summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2016-12-09 13:37:37 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2016-12-09 13:37:41 +1100
commit5a889d3c2325b50f287f25a7729bf1bb85dd5aed (patch)
treedd7768e394d35b43674d2bc963aff02e9aa4af26
parent6b523f8b122d255415ce39eea563eb3e92e420b1 (diff)
parentd5b49bf3759103772af20d01310e39ccd564f37c (diff)
Merge remote-tracking branch 'tip/auto-latest'
-rw-r--r--CREDITS4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu16
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html25
-rw-r--r--Documentation/RCU/whatisRCU.txt2
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/arch_timer.txt5
-rw-r--r--Documentation/kbuild/kconfig-language.txt29
-rw-r--r--Documentation/sysctl/kernel.txt8
-rw-r--r--Documentation/trace/ftrace.txt20
-rw-r--r--Documentation/virtual/kvm/msr.txt9
-rw-r--r--Documentation/x86/intel_rdt_ui.txt214
-rw-r--r--Documentation/x86/x86_64/boot-options.txt4
-rw-r--r--MAINTAINERS20
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/mutex.h9
-rw-r--r--arch/alpha/include/asm/processor.h1
-rw-r--r--arch/alpha/kernel/osf_sys.c8
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/processor.h3
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/common/bL_switcher.c34
-rw-r--r--arch/arm/include/asm/arch_gicv3.h54
-rw-r--r--arch/arm/include/asm/efi.h3
-rw-r--r--arch/arm/include/asm/mutex.h21
-rw-r--r--arch/arm/include/asm/processor.h2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c47
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h61
-rw-r--r--arch/arm64/include/asm/efi.h3
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/kernel/cpuinfo.c36
-rw-r--r--arch/avr32/include/asm/mutex.h9
-rw-r--r--arch/avr32/include/asm/processor.h1
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/blackfin/include/asm/processor.h1
-rw-r--r--arch/c6x/include/asm/mutex.h6
-rw-r--r--arch/c6x/include/asm/processor.h1
-rw-r--r--arch/cris/include/asm/mutex.h9
-rw-r--r--arch/cris/include/asm/processor.h1
-rw-r--r--arch/frv/include/asm/mutex.h9
-rw-r--r--arch/frv/include/asm/processor.h1
-rw-r--r--arch/h8300/include/asm/mutex.h9
-rw-r--r--arch/h8300/include/asm/processor.h1
-rw-r--r--arch/hexagon/include/asm/mutex.h8
-rw-r--r--arch/hexagon/include/asm/processor.h1
-rw-r--r--arch/ia64/include/asm/mutex.h90
-rw-r--r--arch/ia64/include/asm/processor.h1
-rw-r--r--arch/ia64/kernel/err_inject.c74
-rw-r--r--arch/ia64/kernel/palinfo.c60
-rw-r--r--arch/ia64/kernel/salinfo.c83
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/ia64/kernel/topology.c54
-rw-r--r--arch/m32r/include/asm/mutex.h9
-rw-r--r--arch/m32r/include/asm/processor.h1
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/processor.h1
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/processor.h1
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/irq.h2
-rw-r--r--arch/microblaze/include/asm/mutex.h1
-rw-r--r--arch/microblaze/include/asm/processor.h1
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/intc.c196
-rw-r--r--arch/microblaze/kernel/irq.c4
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/processor.h1
-rw-r--r--arch/mn10300/include/asm/mutex.h16
-rw-r--r--arch/mn10300/include/asm/processor.h1
-rw-r--r--arch/nios2/include/asm/mutex.h1
-rw-r--r--arch/nios2/include/asm/processor.h1
-rw-r--r--arch/openrisc/include/asm/mutex.h27
-rw-r--r--arch/openrisc/include/asm/processor.h1
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/processor.h1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/cputime.h14
-rw-r--r--arch/powerpc/include/asm/mutex.h132
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/spinlock.h8
-rw-r--r--arch/powerpc/include/asm/xilinx_intc.h2
-rw-r--r--arch/powerpc/kernel/sysfs.c50
-rw-r--r--arch/powerpc/kernel/time.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv.c48
-rw-r--r--arch/powerpc/platforms/40x/Kconfig1
-rw-r--r--arch/powerpc/platforms/40x/virtex.c2
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/virtex.c2
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c211
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/mutex.h9
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/spinlock.h8
-rw-r--r--arch/s390/kernel/processor.c4
-rw-r--r--arch/s390/kernel/smp.c45
-rw-r--r--arch/s390/kernel/vtime.c9
-rw-r--r--arch/s390/lib/spinlock.c25
-rw-r--r--arch/score/include/asm/mutex.h6
-rw-r--r--arch/score/include/asm/processor.h1
-rw-r--r--arch/sh/include/asm/mutex-llsc.h109
-rw-r--r--arch/sh/include/asm/mutex.h12
-rw-r--r--arch/sh/include/asm/processor.h1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/processor_32.h1
-rw-r--r--arch/sparc/include/asm/processor_64.h1
-rw-r--r--arch/sparc/kernel/sysfs.c45
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/processor.h2
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/mutex.h20
-rw-r--r--arch/unicore32/include/asm/processor.h1
-rw-r--r--arch/x86/Kconfig104
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/eboot.c65
-rw-r--r--arch/x86/boot/compressed/head_64.S3
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c22
-rw-r--r--arch/x86/entry/calling.h33
-rw-r--r--arch/x86/entry/entry_32.S141
-rw-r--r--arch/x86/entry/entry_64.S16
-rw-r--r--arch/x86/entry/vdso/vma.c10
-rw-r--r--arch/x86/events/intel/cqm.c23
-rw-r--r--arch/x86/events/intel/pt.c45
-rw-r--r--arch/x86/include/asm/Kbuild4
-rw-r--r--arch/x86/include/asm/amd_nb.h23
-rw-r--r--arch/x86/include/asm/apic.h4
-rw-r--r--arch/x86/include/asm/cpufeatures.h10
-rw-r--r--arch/x86/include/asm/efi.h16
-rw-r--r--arch/x86/include/asm/fpu/api.h10
-rw-r--r--arch/x86/include/asm/fpu/internal.h139
-rw-r--r--arch/x86/include/asm/fpu/types.h34
-rw-r--r--arch/x86/include/asm/fpu/xstate.h17
-rw-r--r--arch/x86/include/asm/idle.h16
-rw-r--r--arch/x86/include/asm/intel_rdt.h224
-rw-r--r--arch/x86/include/asm/intel_rdt_common.h27
-rw-r--r--arch/x86/include/asm/kdebug.h1
-rw-r--r--arch/x86/include/asm/lguest_hcall.h1
-rw-r--r--arch/x86/include/asm/mce.h31
-rw-r--r--arch/x86/include/asm/microcode.h18
-rw-r--r--arch/x86/include/asm/microcode_amd.h30
-rw-r--r--arch/x86/include/asm/microcode_intel.h4
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/msr.h46
-rw-r--r--arch/x86/include/asm/mutex.h5
-rw-r--r--arch/x86/include/asm/mutex_32.h110
-rw-r--r--arch/x86/include/asm/mutex_64.h127
-rw-r--r--arch/x86/include/asm/paravirt.h10
-rw-r--r--arch/x86/include/asm/paravirt_types.h4
-rw-r--r--arch/x86/include/asm/percpu.h11
-rw-r--r--arch/x86/include/asm/preempt.h8
-rw-r--r--arch/x86/include/asm/processor.h16
-rw-r--r--arch/x86/include/asm/qspinlock.h6
-rw-r--r--arch/x86/include/asm/special_insns.h13
-rw-r--r--arch/x86/include/asm/stacktrace.h8
-rw-r--r--arch/x86/include/asm/topology.h32
-rw-r--r--arch/x86/include/asm/trace/fpu.h5
-rw-r--r--arch/x86/include/asm/tsc.h9
-rw-r--r--arch/x86/include/asm/uaccess.h13
-rw-r--r--arch/x86/include/asm/unwind.h16
-rw-r--r--arch/x86/include/asm/vgtod.h7
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h1
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h4
-rw-r--r--arch/x86/include/uapi/asm/mce.h1
-rw-r--r--arch/x86/include/uapi/asm/prctl.h8
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/amd_nb.c164
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apm_32.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile6
-rw-r--r--arch/x86/kernel/cpu/amd.c31
-rw-r--r--arch/x86/kernel/cpu/bugs.c26
-rw-r--r--arch/x86/kernel/cpu/bugs_64.c33
-rw-r--r--arch/x86/kernel/cpu/common.c60
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c20
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c403
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c1107
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_schemata.c245
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c200
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c356
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c41
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c56
-rw-r--r--arch/x86/kernel/cpu/microcode/Makefile2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c421
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c83
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c834
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_lib.c184
-rw-r--r--arch/x86/kernel/cpu/scattered.c60
-rw-r--r--arch/x86/kernel/cpu/vmware.c86
-rw-r--r--arch/x86/kernel/cpuid.c73
-rw-r--r--arch/x86/kernel/dumpstack.c68
-rw-r--r--arch/x86/kernel/dumpstack_32.c56
-rw-r--r--arch/x86/kernel/dumpstack_64.c79
-rw-r--r--arch/x86/kernel/fpu/bugs.c7
-rw-r--r--arch/x86/kernel/fpu/core.c74
-rw-r--r--arch/x86/kernel/fpu/init.c107
-rw-r--r--arch/x86/kernel/fpu/signal.c8
-rw-r--r--arch/x86/kernel/fpu/xstate.c11
-rw-r--r--arch/x86/kernel/head_32.S49
-rw-r--r--arch/x86/kernel/head_64.S60
-rw-r--r--arch/x86/kernel/itmt.c215
-rw-r--r--arch/x86/kernel/kvm.c19
-rw-r--r--arch/x86/kernel/msr.c69
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c14
-rw-r--r--arch/x86/kernel/paravirt.c1
-rw-r--r--arch/x86/kernel/paravirt_patch_32.c10
-rw-r--r--arch/x86/kernel/paravirt_patch_64.c10
-rw-r--r--arch/x86/kernel/process.c48
-rw-r--r--arch/x86/kernel/process_32.c16
-rw-r--r--arch/x86/kernel/process_64.c22
-rw-r--r--arch/x86/kernel/rtc.c9
-rw-r--r--arch/x86/kernel/setup_percpu.c3
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/smpboot.c55
-rw-r--r--arch/x86/kernel/traps.c20
-rw-r--r--arch/x86/kernel/tsc.c36
-rw-r--r--arch/x86/kernel/tsc_msr.c19
-rw-r--r--arch/x86/kernel/tsc_sync.c255
-rw-r--r--arch/x86/kernel/unwind_frame.c161
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/kvm/cpuid.c4
-rw-r--r--arch/x86/kvm/vmx.c12
-rw-r--r--arch/x86/kvm/x86.c35
-rw-r--r--arch/x86/lguest/boot.c29
-rw-r--r--arch/x86/lib/copy_user_64.S47
-rw-r--r--arch/x86/lib/msr.c4
-rw-r--r--arch/x86/lib/usercopy.c49
-rw-r--r--arch/x86/lib/usercopy_32.c49
-rw-r--r--arch/x86/mm/fault.c3
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/mm/pkeys.c3
-rw-r--r--arch/x86/oprofile/nmi_int.c71
-rw-r--r--arch/x86/pci/amd_bus.c34
-rw-r--r--arch/x86/platform/intel-mid/mfld.c9
-rw-r--r--arch/x86/platform/intel-mid/mrfld.c8
-rw-r--r--arch/x86/platform/intel-mid/pwr.c1
-rw-r--r--arch/x86/platform/uv/uv_nmi.c4
-rw-r--r--arch/x86/ras/mce_amd_inj.c2
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/tools/insn_sanity.c3
-rw-r--r--arch/x86/tools/test_get_len.c2
-rw-r--r--arch/x86/um/asm/processor.h1
-rw-r--r--arch/x86/xen/enlighten.c13
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/xtensa/include/asm/mutex.h9
-rw-r--r--arch/xtensa/include/asm/processor.h1
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/base/cacheinfo.c62
-rw-r--r--drivers/base/power/trace.c27
-rw-r--r--drivers/base/topology.c42
-rw-r--r--drivers/block/zram/zcomp.c76
-rw-r--r--drivers/block/zram/zcomp.h5
-rw-r--r--drivers/block/zram/zram_drv.c9
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/hw_random/via-rng.c8
-rw-r--r--drivers/clocksource/arm_arch_timer.c14
-rw-r--r--drivers/clocksource/bcm2835_timer.c14
-rw-r--r--drivers/cpufreq/Kconfig.x861
-rw-r--r--drivers/cpufreq/intel_pstate.c56
-rw-r--r--drivers/crypto/padlock-aes.c23
-rw-r--r--drivers/crypto/padlock-sha.c18
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/edac/mce_amd.c12
-rw-r--r--drivers/firmware/efi/Kconfig18
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/apple-properties.c248
-rw-r--r--drivers/firmware/efi/arm-init.c4
-rw-r--r--drivers/firmware/efi/dev-path-parser.c203
-rw-r--r--drivers/firmware/efi/efi.c76
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c33
-rw-r--r--drivers/firmware/efi/libstub/efistub.h11
-rw-r--r--drivers/firmware/efi/libstub/random.c67
-rw-r--r--drivers/firmware/efi/test/efi_test.c15
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c27
-rw-r--r--drivers/idle/Kconfig17
-rw-r--r--drivers/idle/Makefile1
-rw-r--r--drivers/idle/i7300_idle.c612
-rw-r--r--drivers/iommu/intel-iommu.c24
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c26
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c75
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c241
-rw-r--r--drivers/lguest/hypercalls.c4
-rw-r--r--drivers/lguest/lg.h1
-rw-r--r--drivers/lguest/x86/core.c19
-rw-r--r--drivers/misc/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm_bugs.c68
-rw-r--r--drivers/misc/lkdtm_core.c2
-rw-r--r--drivers/net/ethernet/adi/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig4
-rw-r--r--drivers/net/ethernet/cavium/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/samsung/Kconfig2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/oprofile/nmi_timer_int.c58
-rw-r--r--drivers/pci/host/pci-xgene-msi.c69
-rw-r--r--drivers/pci/msi.c78
-rw-r--r--drivers/ptp/Kconfig10
-rw-r--r--drivers/rtc/rtc-cmos.c7
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c46
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c39
-rw-r--r--drivers/thunderbolt/Kconfig2
-rw-r--r--drivers/thunderbolt/eeprom.c43
-rw-r--r--drivers/thunderbolt/switch.c2
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/video/fbdev/efifb.c59
-rw-r--r--drivers/watchdog/octeon-wdt-main.c62
-rw-r--r--drivers/xen/events/events_base.c1
-rw-r--r--fs/buffer.c16
-rw-r--r--fs/exec.c2
-rw-r--r--include/asm-generic/cputime_jiffies.h1
-rw-r--r--include/asm-generic/cputime_nsecs.h1
-rw-r--r--include/asm-generic/mutex-dec.h88
-rw-r--r--include/asm-generic/mutex-null.h19
-rw-r--r--include/asm-generic/mutex-xchg.h120
-rw-r--r--include/asm-generic/mutex.h9
-rw-r--r--include/asm-generic/pgtable.h9
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/alarmtimer.h5
-rw-r--r--include/linux/bug.h17
-rw-r--r--include/linux/cacheinfo.h3
-rw-r--r--include/linux/clocksource.h5
-rw-r--r--include/linux/cpuhotplug.h21
-rw-r--r--include/linux/efi.h46
-rw-r--r--include/linux/interrupt.h20
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/kernel_stat.h4
-rw-r--r--include/linux/kvm_host.h3
-rw-r--r--include/linux/list.h37
-rw-r--r--include/linux/mc146818rtc.h1
-rw-r--r--include/linux/mutex-debug.h24
-rw-r--r--include/linux/mutex.h77
-rw-r--r--include/linux/pci.h25
-rw-r--r--include/linux/pm-trace.h9
-rw-r--r--include/linux/preempt.h21
-rw-r--r--include/linux/ptp_clock_kernel.h65
-rw-r--r--include/linux/rculist.h8
-rw-r--r--include/linux/ring_buffer.h6
-rw-r--r--include/linux/sched.h112
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/time.h2
-rw-r--r--include/linux/timekeeping.h1
-rw-r--r--include/linux/ww_mutex.h2
-rw-r--r--include/net/flow.h1
-rw-r--r--include/net/flowcache.h2
-rw-r--r--include/trace/events/alarmtimer.h96
-rw-r--r--include/trace/events/rcu.h5
-rw-r--r--include/uapi/linux/magic.h1
-rw-r--r--init/Kconfig17
-rw-r--r--ipc/mqueue.c4
-rw-r--r--ipc/msg.c8
-rw-r--r--kernel/Kconfig.locks2
-rw-r--r--kernel/compat.c8
-rw-r--r--kernel/exit.c15
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/futex.c8
-rw-r--r--kernel/irq/affinity.c72
-rw-r--r--kernel/irq/msi.c4
-rw-r--r--kernel/locking/lockdep.c10
-rw-r--r--kernel/locking/mcs_spinlock.h4
-rw-r--r--kernel/locking/mutex-debug.c13
-rw-r--r--kernel/locking/mutex-debug.h10
-rw-r--r--kernel/locking/mutex.c588
-rw-r--r--kernel/locking/mutex.h26
-rw-r--r--kernel/locking/osq_lock.c15
-rw-r--r--kernel/locking/qrwlock.c6
-rw-r--r--kernel/locking/rtmutex.c18
-rw-r--r--kernel/locking/rtmutex_common.h3
-rw-r--r--kernel/locking/rwsem-xadd.c28
-rw-r--r--kernel/printk/printk.c25
-rw-r--r--kernel/rcu/rcutorture.c11
-rw-r--r--kernel/rcu/tree.c17
-rw-r--r--kernel/rcu/tree.h1
-rw-r--r--kernel/rcu/tree_exp.h12
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/sched/cpuacct.c2
-rw-r--r--kernel/sched/cputime.c124
-rw-r--r--kernel/sched/deadline.c4
-rw-r--r--kernel/sched/fair.c612
-rw-r--r--kernel/sched/sched.h11
-rw-r--r--kernel/signal.c6
-rw-r--r--kernel/smp.c18
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl.c14
-rw-r--r--kernel/time/Makefile10
-rw-r--r--kernel/time/alarmtimer.c59
-rw-r--r--kernel/time/hrtimer.c20
-rw-r--r--kernel/time/itimer.c15
-rw-r--r--kernel/time/posix-cpu-timers.c8
-rw-r--r--kernel/time/posix-stubs.c123
-rw-r--r--kernel/time/tick-sched.c33
-rw-r--r--kernel/time/timekeeping.c29
-rw-r--r--kernel/time/timer.c48
-rw-r--r--kernel/trace/ring_buffer.c137
-rw-r--r--kernel/trace/trace.c16
-rw-r--r--lib/Kconfig.debug15
-rw-r--r--lib/list_debug.c99
-rw-r--r--lib/lockref.c2
-rw-r--r--lib/percpu_counter.c25
-rw-r--r--lib/radix-tree.c25
-rw-r--r--mm/compaction.c31
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/memcontrol.c24
-rw-r--r--mm/memory.c8
-rw-r--r--mm/page_alloc.c53
-rw-r--r--mm/vmscan.c28
-rw-r--r--mm/vmstat.c95
-rw-r--r--mm/zsmalloc.c67
-rw-r--r--mm/zswap.c172
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/flow.c60
-rw-r--r--net/iucv/iucv.c124
-rw-r--r--net/xfrm/xfrm_policy.c1
-rwxr-xr-xscripts/checkpatch.pl6
-rwxr-xr-xscripts/decode_stacktrace.sh3
-rwxr-xr-xscripts/faddr2line33
-rw-r--r--scripts/kconfig/expr.h2
-rw-r--r--scripts/kconfig/menu.c55
-rw-r--r--scripts/kconfig/symbol.c24
-rw-r--r--scripts/kconfig/zconf.gperf1
-rw-r--r--scripts/kconfig/zconf.hash.c_shipped30
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped1581
-rw-r--r--scripts/kconfig/zconf.y16
-rw-r--r--security/selinux/hooks.c11
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/build/Build.include22
-rw-r--r--tools/build/Documentation/Build.txt6
-rw-r--r--tools/build/Makefile.feature138
-rw-r--r--tools/build/feature/Makefile126
-rw-r--r--tools/build/feature/test-clang.cpp21
-rw-r--r--tools/build/feature/test-jvmti.c13
-rw-r--r--tools/build/feature/test-llvm-version.cpp11
-rw-r--r--tools/build/feature/test-llvm.cpp13
-rw-r--r--tools/build/fixdep.c5
-rw-r--r--tools/include/asm-generic/bitops.h1
-rw-r--r--tools/include/asm-generic/bitops/__ffz.h12
-rw-r--r--tools/include/asm-generic/bitops/find.h28
-rw-r--r--tools/include/linux/bitops.h5
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h5
-rw-r--r--tools/lib/bpf/bpf.c56
-rw-r--r--tools/lib/bpf/bpf.h7
-rw-r--r--tools/lib/bpf/libbpf.c177
-rw-r--r--tools/lib/bpf/libbpf.h13
-rw-r--r--tools/lib/find_bit.c25
-rw-r--r--tools/lib/subcmd/parse-options.c14
-rw-r--r--tools/lib/subcmd/parse-options.h2
-rw-r--r--tools/lib/traceevent/Makefile40
-rw-r--r--tools/lib/traceevent/event-parse.c41
-rw-r--r--tools/lib/traceevent/event-parse.h5
-rw-r--r--tools/perf/Build1
-rw-r--r--tools/perf/Documentation/intel-pt.txt19
-rw-r--r--tools/perf/Documentation/jitdump-specification.txt170
-rw-r--r--tools/perf/Documentation/perf-c2c.txt290
-rw-r--r--tools/perf/Documentation/perf-config.txt35
-rw-r--r--tools/perf/Documentation/perf-kmem.txt7
-rw-r--r--tools/perf/Documentation/perf-record.txt9
-rw-r--r--tools/perf/Documentation/perf-report.txt10
-rw-r--r--tools/perf/Documentation/perf-sched.txt78
-rw-r--r--tools/perf/Documentation/perf-script.txt16
-rw-r--r--tools/perf/Documentation/perf-top.txt1
-rw-r--r--tools/perf/Documentation/perf-trace.txt5
-rw-r--r--tools/perf/MANIFEST1
-rw-r--r--tools/perf/Makefile.config94
-rw-r--r--tools/perf/Makefile.perf150
-rw-r--r--tools/perf/arch/arm/annotate/instructions.c59
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c2
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c62
-rw-r--r--tools/perf/arch/powerpc/annotate/instructions.c58
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c78
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl7
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c10
-rw-r--r--tools/perf/bench/futex-hash.c10
-rw-r--r--tools/perf/bench/futex-lock-pi.c7
-rw-r--r--tools/perf/bench/futex-requeue.c2
-rw-r--r--tools/perf/bench/futex-wake-parallel.c4
-rw-r--r--tools/perf/bench/futex-wake.c3
-rw-r--r--tools/perf/bench/futex.h4
-rw-r--r--tools/perf/bench/mem-functions.c77
-rw-r--r--tools/perf/builtin-c2c.c2780
-rw-r--r--tools/perf/builtin-config.c137
-rw-r--r--tools/perf/builtin-kmem.c36
-rw-r--r--tools/perf/builtin-record.c11
-rw-r--r--tools/perf/builtin-report.c29
-rw-r--r--tools/perf/builtin-sched.c1118
-rw-r--r--tools/perf/builtin-script.c51
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c68
-rw-r--r--tools/perf/builtin.h1
-rw-r--r--tools/perf/jvmti/Build8
-rw-r--r--tools/perf/jvmti/Makefile89
-rw-r--r--tools/perf/jvmti/jvmti_agent.c38
-rw-r--r--tools/perf/jvmti/libjvmti.c39
-rw-r--r--tools/perf/perf.c1
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv21
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/cache.json176
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/floating-point.json14
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/frontend.json470
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/marked.json794
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/memory.json212
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json4064
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/pipeline.json350
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/pmc.json140
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/translation.json176
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/cache.json746
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/floating-point.json261
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json83
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/memory.json154
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/other.json450
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json364
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json124
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json3198
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json171
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json286
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json2845
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json1417
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json388
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json774
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json171
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/frontend.json286
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/memory.json433
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json1417
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json388
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json942
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json171
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/frontend.json286
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json649
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json1417
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json388
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json1127
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/frontend.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json34
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/other.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json433
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json75
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json1041
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json83
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/frontend.json294
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json655
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/other.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json1329
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json484
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json1077
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/floating-point.json83
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/frontend.json294
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json739
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/other.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json1329
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json484
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json1123
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json151
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/memory.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json1307
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json180
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json1272
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/floating-point.json151
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/frontend.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/memory.json503
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/other.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json1307
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json198
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json1290
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/floating-point.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/frontend.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/memory.json422
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/other.json58
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json1220
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json2305
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/frontend.json34
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/memory.json1110
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json435
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json65
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv35
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/cache.json3229
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/memory.json739
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/other.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json881
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/cache.json3184
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/memory.json739
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/other.json210
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json881
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json109
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json1879
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/memory.json445
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json58
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json1220
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json811
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/frontend.json47
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/memory.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/pipeline.json359
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json69
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json4299
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/floating-point.json68
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json472
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json2309
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/other.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json939
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json272
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json2817
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json758
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/other.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json899
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json173
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json3233
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json739
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/other.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json899
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/cache.json3225
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/floating-point.json229
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/memory.json747
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/other.json287
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/pipeline.json905
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json173
-rw-r--r--tools/perf/tests/Build2
-rw-r--r--tools/perf/tests/backward-ring-buffer.c2
-rw-r--r--tools/perf/tests/bpf.c8
-rw-r--r--tools/perf/tests/builtin-test.c105
-rw-r--r--tools/perf/tests/clang.c46
-rw-r--r--tools/perf/tests/llvm.c8
-rw-r--r--tools/perf/tests/llvm.h7
-rw-r--r--tools/perf/tests/make6
-rw-r--r--tools/perf/tests/perf-hooks.c48
-rw-r--r--tools/perf/tests/tests.h4
-rw-r--r--tools/perf/ui/browsers/annotate.c26
-rw-r--r--tools/perf/ui/browsers/hists.c27
-rw-r--r--tools/perf/ui/browsers/hists.h1
-rw-r--r--tools/perf/ui/gtk/annotate.c2
-rw-r--r--tools/perf/ui/helpline.c10
-rw-r--r--tools/perf/ui/helpline.h1
-rw-r--r--tools/perf/ui/stdio/hist.c35
-rw-r--r--tools/perf/util/Build7
-rw-r--r--tools/perf/util/annotate.c392
-rw-r--r--tools/perf/util/annotate.h23
-rw-r--r--tools/perf/util/bpf-loader.c33
-rw-r--r--tools/perf/util/c++/Build2
-rw-r--r--tools/perf/util/c++/clang-c.h43
-rw-r--r--tools/perf/util/c++/clang-test.cpp62
-rw-r--r--tools/perf/util/c++/clang.cpp195
-rw-r--r--tools/perf/util/c++/clang.h26
-rw-r--r--tools/perf/util/callchain.c232
-rw-r--r--tools/perf/util/callchain.h29
-rw-r--r--tools/perf/util/config.c20
-rw-r--r--tools/perf/util/config.h4
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/evsel.c15
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/evsel_fprintf.c25
-rw-r--r--tools/perf/util/genelf.c113
-rw-r--r--tools/perf/util/genelf.h5
-rw-r--r--tools/perf/util/header.c19
-rw-r--r--tools/perf/util/hist.c1
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/intel-bts.c9
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c13
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h6
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.c4
-rw-r--r--tools/perf/util/intel-pt.c19
-rw-r--r--tools/perf/util/jitdump.c82
-rw-r--r--tools/perf/util/jitdump.h12
-rw-r--r--tools/perf/util/llvm-utils.c78
-rw-r--r--tools/perf/util/llvm-utils.h6
-rw-r--r--tools/perf/util/machine.c82
-rw-r--r--tools/perf/util/map.c17
-rw-r--r--tools/perf/util/mem-events.c136
-rw-r--r--tools/perf/util/mem-events.h38
-rw-r--r--tools/perf/util/parse-branch-options.c85
-rw-r--r--tools/perf/util/parse-branch-options.h3
-rw-r--r--tools/perf/util/parse-events.c15
-rw-r--r--tools/perf/util/perf-hooks-list.h3
-rw-r--r--tools/perf/util/perf-hooks.c88
-rw-r--r--tools/perf/util/perf-hooks.h39
-rw-r--r--tools/perf/util/pmu.c14
-rw-r--r--tools/perf/util/probe-event.h2
-rw-r--r--tools/perf/util/python-ext-sources1
-rw-r--r--tools/perf/util/quote.c2
-rw-r--r--tools/perf/util/session.c10
-rw-r--r--tools/perf/util/sort.c2
-rw-r--r--tools/perf/util/sort.h1
-rw-r--r--tools/perf/util/string.c21
-rw-r--r--tools/perf/util/symbol.c10
-rw-r--r--tools/perf/util/symbol.h11
-rw-r--r--tools/perf/util/symbol_fprintf.c11
-rw-r--r--tools/perf/util/time-utils.c119
-rw-r--r--tools/perf/util/time-utils.h14
-rw-r--r--tools/perf/util/trace-event-scripting.c39
-rw-r--r--tools/perf/util/unwind-libunwind-local.c4
-rw-r--r--tools/perf/util/util-cxx.h26
-rw-r--r--tools/perf/util/util.c88
-rw-r--r--tools/perf/util/util.h6
-rw-r--r--tools/perf/util/values.c81
-rw-r--r--tools/perf/util/values.h4
-rw-r--r--tools/testing/selftests/rcutorture/.gitignore2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh5
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c2
-rw-r--r--tools/testing/selftests/x86/Makefile2
-rw-r--r--tools/testing/selftests/x86/test_vdso.c123
-rw-r--r--virt/kvm/kvm_main.c20
735 files changed, 108493 insertions, 9356 deletions
diff --git a/CREDITS b/CREDITS
index d7ebdfbc4d4f..10a9eee807b6 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2775,6 +2775,10 @@ S: C/ Mieses 20, 9-B
S: Valladolid 47009
S: Spain
+N: Peter Oruba
+D: AMD Microcode loader driver
+S: Germany
+
N: Jens Osterkamp
E: jens@de.ibm.com
D: Maintainer of Spidernet network driver for Cell
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 498741737055..2a4a423d08e0 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -272,6 +272,22 @@ Description: Parameters for the CPU cache attributes
the modified cache line is written to main
memory only when it is replaced
+
+What: /sys/devices/system/cpu/cpu*/cache/index*/id
+Date: September 2016
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description: Cache id
+
+ The id provides a unique number for a specific instance of
+ a cache of a particular type. E.g. there may be a level
+ 3 unified cache on each socket in a server and we may
+ assign them ids 0, 1, 2, ...
+
+ Note that id value can be non-contiguous. E.g. level 1
+ caches typically exist per core, but there may not be a
+ power of two cores on a socket, so these caches may be
+ numbered 0, 1, 2, 3, 4, 5, 8, 9, 10, ...
+
What: /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats
/sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/turbo_stat
/sys/devices/system/cpu/cpuX/cpufreq/throttle_stats/sub_turbo_stat
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index a4d3838130e4..39bcb74ea733 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -547,7 +547,7 @@ The <tt>rcu_access_pointer()</tt> on line&nbsp;6 is similar to
It could reuse a value formerly fetched from this same pointer.
It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
- mash-up of two distince pointer values.
+ mash-up of two distinct pointer values.
It might even use value-speculation optimizations, where it makes
a wrong guess, but by the time it gets around to checking the
value, an update has changed the pointer to match the wrong guess.
@@ -659,6 +659,29 @@ systems with more than one CPU:
In other words, a given instance of <tt>synchronize_rcu()</tt>
can avoid waiting on a given RCU read-side critical section only
if it can prove that <tt>synchronize_rcu()</tt> started first.
+
+ <p>
+ A related question is &ldquo;When <tt>rcu_read_lock()</tt>
+ doesn't generate any code, why does it matter how it relates
+ to a grace period?&rdquo;
+ The answer is that it is not the relationship of
+ <tt>rcu_read_lock()</tt> itself that is important, but rather
+ the relationship of the code within the enclosed RCU read-side
+ critical section to the code preceding and following the
+ grace period.
+ If we take this viewpoint, then a given RCU read-side critical
+ section begins before a given grace period when some access
+ preceding the grace period observes the effect of some access
+ within the critical section, in which case none of the accesses
+ within the critical section may observe the effects of any
+ access following the grace period.
+
+ <p>
+ As of late 2016, mathematical models of RCU take this
+ viewpoint, for example, see slides&nbsp;62 and&nbsp;63
+ of the
+ <a href="http://www2.rdrop.com/users/paulmck/scalability/paper/LinuxMM.2016.10.04c.LCE.pdf">2016 LinuxCon EU</a>
+ presentation.
</font></td></tr>
<tr><td>&nbsp;</td></tr>
</table>
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 204422719197..5cbd8b2395b8 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -237,7 +237,7 @@ rcu_dereference()
The reader uses rcu_dereference() to fetch an RCU-protected
pointer, which returns a value that may then be safely
- dereferenced. Note that rcu_deference() does not actually
+ dereferenced. Note that rcu_dereference() does not actually
dereference the pointer, instead, it protects the pointer for
later dereferencing. It also executes any needed memory-barrier
instructions for a given CPU architecture. Currently, only Alpha
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 7bf7e3c30579..f3dbc8cb67d1 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -863,6 +863,11 @@
dscc4.setup= [NET]
+ dump_apple_properties [X86]
+ Dump name and content of EFI device properties on
+ x86 Macs. Useful for driver authors to determine
+ what data is available or for reverse-engineering.
+
dyndbg[="val"] [KNL,DYNAMIC_DEBUG]
module.dyndbg[="val"]
Enable debug messages at boot time. See
@@ -875,12 +880,6 @@
nopku [X86] Disable Memory Protection Keys CPU feature found
in some Intel CPUs.
- eagerfpu= [X86]
- on enable eager fpu restore
- off disable eager fpu restore
- auto selects the default scheme, which automatically
- enables eagerfpu restore for xsaveopt.
-
module.async_probe [KNL]
Enable asynchronous probe on this module.
@@ -1768,9 +1767,6 @@
kmemcheck=2 (one-shot mode)
Default: 2 (one-shot mode)
- kstack=N [X86] Print N words from the kernel stack
- in oops dumps.
-
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)
@@ -2570,6 +2566,10 @@
no-kvmapf [X86,KVM] Disable paravirtualized asynchronous page
fault handling.
+ no-vmw-sched-clock
+ [X86,PV_OPS] Disable paravirtualized VMware scheduler
+ clock and use the default one.
+
no-steal-acc [X86,KVM] Disable paravirtualized steal time accounting.
steal time is computed, but won't influence scheduler
behaviour
@@ -3639,12 +3639,6 @@
shapers= [NET]
Maximal number of shapers.
- show_msr= [x86] show boot-time MSR settings
- Format: { <integer> }
- Show boot-time (BIOS-initialized) MSR settings.
- The parameter means the number of CPUs to show,
- for example 1 means boot CPU only.
-
simeth= [IA-64]
simscsi=
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index ef5fbe9a77c7..ad440a2b8051 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -38,6 +38,11 @@ to deliver its interrupts via SPIs.
architecturally-defined reset values. Only supported for 32-bit
systems which follow the ARMv7 architected reset values.
+- arm,no-tick-in-suspend : The main counter does not tick when the system is in
+ low-power system suspend on some SoCs. This behavior does not match the
+ Architecture Reference Manual's specification that the system counter "must
+ be implemented in an always-on power domain."
+
Example:
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 069fcb3eef6e..262722d8867b 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -113,6 +113,34 @@ applicable everywhere (see syntax).
That will limit the usefulness but on the other hand avoid
the illegal configurations all over.
+- weak reverse dependencies: "imply" <symbol> ["if" <expr>]
+ This is similar to "select" as it enforces a lower limit on another
+ symbol except that the "implied" symbol's value may still be set to n
+ from a direct dependency or with a visible prompt.
+
+ Given the following example:
+
+ config FOO
+ tristate
+ imply BAZ
+
+ config BAZ
+ tristate
+ depends on BAR
+
+ The following values are possible:
+
+ FOO BAR BAZ's default choice for BAZ
+ --- --- ------------- --------------
+ n y n N/m/y
+ m y m M/y/n
+ y y y Y/n
+ y n * N
+
+ This is useful e.g. with multiple drivers that want to indicate their
+ ability to hook into a secondary subsystem while allowing the user to
+ configure that subsystem out without also having to unset these drivers.
+
- limiting menu display: "visible if" <expr>
This attribute is only applicable to menu blocks, if the condition is
false, the menu block is not displayed to the user (the symbols
@@ -481,6 +509,7 @@ historical issues resolved through these different solutions.
b) Match dependency semantics:
b1) Swap all "select FOO" to "depends on FOO" or,
b2) Swap all "depends on FOO" to "select FOO"
+ c) Consider the use of "imply" instead of "select"
The resolution to a) can be tested with the sample Kconfig file
Documentation/kbuild/Kconfig.recursion-issue-01 through the removal
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 6bb78f872929..a32b4b748644 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -40,7 +40,6 @@ show up in /proc/sys/kernel:
- hung_task_warnings
- kexec_load_disabled
- kptr_restrict
-- kstack_depth_to_print [ X86 only ]
- l2cr [ PPC only ]
- modprobe ==> Documentation/debugging-modules.txt
- modules_disabled
@@ -395,13 +394,6 @@ When kptr_restrict is set to (2), kernel pointers printed using
==============================================================
-kstack_depth_to_print: (X86 only)
-
-Controls the number of words to print when dumping the raw
-kernel stack.
-
-==============================================================
-
l2cr: (PPC only)
This flag controls the L2 cache of G3 processor boards. If
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 185c39fea2a0..5596e2d71d6d 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -362,6 +362,26 @@ of ftrace. Here is a list of some of the key files:
to correlate events across hypervisor/guest if
tb_offset is known.
+ mono: This uses the fast monotonic clock (CLOCK_MONOTONIC)
+ which is monotonic and is subject to NTP rate adjustments.
+
+ mono_raw:
+ This is the raw monotonic clock (CLOCK_MONOTONIC_RAW)
+ which is montonic but is not subject to any rate adjustments
+ and ticks at the same rate as the hardware clocksource.
+
+ boot: This is the boot clock (CLOCK_BOOTTIME) and is based on the
+ fast monotonic clock, but also accounts for time spent in
+ suspend. Since the clock access is designed for use in
+ tracing in the suspend path, some side effects are possible
+ if clock is accessed after the suspend time is accounted before
+ the fast mono clock is updated. In this case, the clock update
+ appears to happen slightly sooner than it normally would have.
+ Also on 32-bit systems, it's possible that the 64-bit boot offset
+ sees a partial update. These effects are rare and post
+ processing should be able to handle them. See comments in the
+ ktime_get_boot_fast_ns() function for more information.
+
To set a clock, simply echo the clock name into this file.
echo global > trace_clock
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 2a71c8f29f68..0a9ea515512a 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -208,7 +208,9 @@ MSR_KVM_STEAL_TIME: 0x4b564d03
__u64 steal;
__u32 version;
__u32 flags;
- __u32 pad[12];
+ __u8 preempted;
+ __u8 u8_pad[3];
+ __u32 pad[11];
}
whose data will be filled in by the hypervisor periodically. Only one
@@ -232,6 +234,11 @@ MSR_KVM_STEAL_TIME: 0x4b564d03
nanoseconds. Time during which the vcpu is idle, will not be
reported as steal time.
+ preempted: indicate the vCPU who owns this struct is running or
+ not. Non-zero values mean the vCPU has been preempted. Zero
+ means the vCPU is not preempted. NOTE, it is always zero if the
+ the hypervisor doesn't support this field.
+
MSR_KVM_EOI_EN: 0x4b564d04
data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0
when disabled. Bit 1 is reserved and must be zero. When PV end of
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/intel_rdt_ui.txt
new file mode 100644
index 000000000000..d918d268cd72
--- /dev/null
+++ b/Documentation/x86/intel_rdt_ui.txt
@@ -0,0 +1,214 @@
+User Interface for Resource Allocation in Intel Resource Director Technology
+
+Copyright (C) 2016 Intel Corporation
+
+Fenghua Yu <fenghua.yu@intel.com>
+Tony Luck <tony.luck@intel.com>
+
+This feature is enabled by the CONFIG_INTEL_RDT_A Kconfig and the
+X86 /proc/cpuinfo flag bits "rdt", "cat_l3" and "cdp_l3".
+
+To use the feature mount the file system:
+
+ # mount -t resctrl resctrl [-o cdp] /sys/fs/resctrl
+
+mount options are:
+
+"cdp": Enable code/data prioritization in L3 cache allocations.
+
+
+Info directory
+--------------
+
+The 'info' directory contains information about the enabled
+resources. Each resource has its own subdirectory. The subdirectory
+names reflect the resource names. Each subdirectory contains the
+following files:
+
+"num_closids": The number of CLOSIDs which are valid for this
+ resource. The kernel uses the smallest number of
+ CLOSIDs of all enabled resources as limit.
+
+"cbm_mask": The bitmask which is valid for this resource. This
+ mask is equivalent to 100%.
+
+"min_cbm_bits": The minimum number of consecutive bits which must be
+ set when writing a mask.
+
+
+Resource groups
+---------------
+Resource groups are represented as directories in the resctrl file
+system. The default group is the root directory. Other groups may be
+created as desired by the system administrator using the "mkdir(1)"
+command, and removed using "rmdir(1)".
+
+There are three files associated with each group:
+
+"tasks": A list of tasks that belongs to this group. Tasks can be
+ added to a group by writing the task ID to the "tasks" file
+ (which will automatically remove them from the previous
+ group to which they belonged). New tasks created by fork(2)
+ and clone(2) are added to the same group as their parent.
+ If a pid is not in any sub partition, it is in root partition
+ (i.e. default partition).
+
+"cpus": A bitmask of logical CPUs assigned to this group. Writing
+ a new mask can add/remove CPUs from this group. Added CPUs
+ are removed from their previous group. Removed ones are
+ given to the default (root) group. You cannot remove CPUs
+ from the default group.
+
+"schemata": A list of all the resources available to this group.
+ Each resource has its own line and format - see below for
+ details.
+
+When a task is running the following rules define which resources
+are available to it:
+
+1) If the task is a member of a non-default group, then the schemata
+for that group is used.
+
+2) Else if the task belongs to the default group, but is running on a
+CPU that is assigned to some specific group, then the schemata for
+the CPU's group is used.
+
+3) Otherwise the schemata for the default group is used.
+
+
+Schemata files - general concepts
+---------------------------------
+Each line in the file describes one resource. The line starts with
+the name of the resource, followed by specific values to be applied
+in each of the instances of that resource on the system.
+
+Cache IDs
+---------
+On current generation systems there is one L3 cache per socket and L2
+caches are generally just shared by the hyperthreads on a core, but this
+isn't an architectural requirement. We could have multiple separate L3
+caches on a socket, multiple cores could share an L2 cache. So instead
+of using "socket" or "core" to define the set of logical cpus sharing
+a resource we use a "Cache ID". At a given cache level this will be a
+unique number across the whole system (but it isn't guaranteed to be a
+contiguous sequence, there may be gaps). To find the ID for each logical
+CPU look in /sys/devices/system/cpu/cpu*/cache/index*/id
+
+Cache Bit Masks (CBM)
+---------------------
+For cache resources we describe the portion of the cache that is available
+for allocation using a bitmask. The maximum value of the mask is defined
+by each cpu model (and may be different for different cache levels). It
+is found using CPUID, but is also provided in the "info" directory of
+the resctrl file system in "info/{resource}/cbm_mask". X86 hardware
+requires that these masks have all the '1' bits in a contiguous block. So
+0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9
+and 0xA are not. On a system with a 20-bit mask each bit represents 5%
+of the capacity of the cache. You could partition the cache into four
+equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000.
+
+
+L3 details (code and data prioritization disabled)
+--------------------------------------------------
+With CDP disabled the L3 schemata format is:
+
+ L3:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+L3 details (CDP enabled via mount option to resctrl)
+----------------------------------------------------
+When CDP is enabled L3 control is split into two separate resources
+so you can specify independent masks for code and data like this:
+
+ L3data:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+ L3code:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+L2 details
+----------
+L2 cache does not support code and data prioritization, so the
+schemata format is always:
+
+ L2:<cache_id0>=<cbm>;<cache_id1>=<cbm>;...
+
+Example 1
+---------
+On a two socket machine (one L3 cache per socket) with just four bits
+for cache bit masks
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+# mkdir p0 p1
+# echo "L3:0=3;1=c" > /sys/fs/resctrl/p0/schemata
+# echo "L3:0=3;1=3" > /sys/fs/resctrl/p1/schemata
+
+The default resource group is unmodified, so we have access to all parts
+of all caches (its schemata file reads "L3:0=f;1=f").
+
+Tasks that are under the control of group "p0" may only allocate from the
+"lower" 50% on cache ID 0, and the "upper" 50% of cache ID 1.
+Tasks in group "p1" use the "lower" 50% of cache on both sockets.
+
+Example 2
+---------
+Again two sockets, but this time with a more realistic 20-bit mask.
+
+Two real time tasks pid=1234 running on processor 0 and pid=5678 running on
+processor 1 on socket 0 on a 2-socket and dual core machine. To avoid noisy
+neighbors, each of the two real-time tasks exclusively occupies one quarter
+of L3 cache on socket 0.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+
+First we reset the schemata for the default group so that the "upper"
+50% of the L3 cache on socket 0 cannot be used by ordinary tasks:
+
+# echo "L3:0=3ff;1=fffff" > schemata
+
+Next we make a resource group for our first real time task and give
+it access to the "top" 25% of the cache on socket 0.
+
+# mkdir p0
+# echo "L3:0=f8000;1=fffff" > p0/schemata
+
+Finally we move our first real time task into this resource group. We
+also use taskset(1) to ensure the task always runs on a dedicated CPU
+on socket 0. Most uses of resource groups will also constrain which
+processors tasks run on.
+
+# echo 1234 > p0/tasks
+# taskset -cp 1 1234
+
+Ditto for the second real time task (with the remaining 25% of cache):
+
+# mkdir p1
+# echo "L3:0=7c00;1=fffff" > p1/schemata
+# echo 5678 > p1/tasks
+# taskset -cp 2 5678
+
+Example 3
+---------
+
+A single socket system which has real-time tasks running on core 4-7 and
+non real-time workload assigned to core 0-3. The real-time tasks share text
+and data, so a per task association is not required and due to interaction
+with the kernel it's desired that the kernel on these cores shares L3 with
+the tasks.
+
+# mount -t resctrl resctrl /sys/fs/resctrl
+# cd /sys/fs/resctrl
+
+First we reset the schemata for the default group so that the "upper"
+50% of the L3 cache on socket 0 cannot be used by ordinary tasks:
+
+# echo "L3:0=3ff" > schemata
+
+Next we make a resource group for our real time cores and give
+it access to the "top" 50% of the cache on socket 0.
+
+# mkdir p0
+# echo "L3:0=ffc00;" > p0/schemata
+
+Finally we move core 4-7 over to the new group and make sure that the
+kernel and the tasks running there get 50% of the cache.
+
+# echo C0 > p0/cpus
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index 0965a71f9942..61b611e9eeaf 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -277,10 +277,6 @@ IOMMU (input/output memory management unit)
space might stop working. Use this option if you have devices that
are accessed from userspace directly on some PCI host bridge.
-Debugging
-
- kstack=N Print N words from the kernel stack in oops dumps.
-
Miscellaneous
nogbpages
diff --git a/MAINTAINERS b/MAINTAINERS
index cf4afada7064..48fa6ea4986d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4704,12 +4704,14 @@ L: linux-efi@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
S: Maintained
F: Documentation/efi-stub.txt
-F: arch/ia64/kernel/efi.c
+F: arch/*/kernel/efi.c
F: arch/x86/boot/compressed/eboot.[ch]
-F: arch/x86/include/asm/efi.h
+F: arch/*/include/asm/efi.h
F: arch/x86/platform/efi/
F: drivers/firmware/efi/
F: include/linux/efi*.h
+F: arch/arm/boot/compressed/efi-header.S
+F: arch/arm64/kernel/efi-entry.S
EFI VARIABLE FILESYSTEM
M: Matthew Garrett <matthew.garrett@nebula.com>
@@ -6180,12 +6182,6 @@ S: Maintained
F: Documentation/cdrom/ide-cd
F: drivers/ide/ide-cd*
-IDLE-I7300
-M: Andy Henroid <andrew.d.henroid@intel.com>
-L: linux-pm@vger.kernel.org
-S: Supported
-F: drivers/idle/i7300_idle.c
-
IEEE 802.15.4 SUBSYSTEM
M: Alexander Aring <aar@pengutronix.de>
M: Stefan Schmidt <stefan@osg.samsung.com>
@@ -10272,6 +10268,14 @@ L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/sw/rdmavt
+RDT - RESOURCE ALLOCATION
+M: Fenghua Yu <fenghua.yu@intel.com>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: arch/x86/kernel/cpu/intel_rdt*
+F: arch/x86/include/asm/intel_rdt*
+F: Documentation/x86/intel_rdt*
+
READ-COPY UPDATE (RCU)
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
diff --git a/arch/Kconfig b/arch/Kconfig
index 659bdd079277..abab6590f08f 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -512,6 +512,9 @@ config HAVE_CONTEXT_TRACKING
config HAVE_VIRT_CPU_ACCOUNTING
bool
+config ARCH_HAS_SCALED_CPUTIME
+ bool
+
config HAVE_VIRT_CPU_ACCOUNTING_GEN
bool
default y if 64BIT
diff --git a/arch/alpha/include/asm/mutex.h b/arch/alpha/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/alpha/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h
index 43a7559c448b..2fec2dee3020 100644
--- a/arch/alpha/include/asm/processor.h
+++ b/arch/alpha/include/asm/processor.h
@@ -58,7 +58,6 @@ unsigned long get_wchan(struct task_struct *p);
((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index ffb93f499c83..56e427c7aa3c 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1029,11 +1029,16 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
}
+asmlinkage long sys_ni_posix_timers(void);
+
SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it)
{
struct itimerval kit;
int error;
+ if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
+ return sys_ni_posix_timers();
+
error = do_getitimer(which, &kit);
if (!error && put_it32(it, &kit))
error = -EFAULT;
@@ -1047,6 +1052,9 @@ SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
struct itimerval kin, kout;
int error;
+ if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
+ return sys_ni_posix_timers();
+
if (in) {
if (get_it32(&kin, in))
return -EFAULT;
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
deleted file mode 100644
index a2f88ff9f506..000000000000
--- a/arch/arc/include/asm/mutex.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
- * atomic dec based which can "count" any number of lock contenders.
- * This ideally needs to be fixed in core, but for now switching to dec ver.
- */
-#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
-#include <asm-generic/mutex-dec.h>
-#else
-#include <asm-generic/mutex-xchg.h>
-#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 16b630fbeb6a..6e1242da0159 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -60,15 +60,12 @@ struct task_struct;
#ifndef CONFIG_EZNPS_MTM_EXT
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#else
#define cpu_relax() \
__asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
-#define cpu_relax_lowlatency() barrier()
-
#endif
#define copy_segments(tsk, mm) do { } while (0)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1bbff5feb1da..186c4c214e0a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -703,6 +703,7 @@ config ARCH_VIRT
select ARM_GIC
select ARM_GIC_V2M if PCI
select ARM_GIC_V3
+ select ARM_GIC_V3_ITS if PCI
select ARM_PSCI
select HAVE_ARM_ARCH_TIMER
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index 37dc0fe1093f..46730017b3c5 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -757,19 +757,18 @@ EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
* while the switcher is active.
* We're just not ready to deal with that given the trickery involved.
*/
-static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int bL_switcher_cpu_pre(unsigned int cpu)
{
- if (bL_switcher_active) {
- int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
- switch (action & 0xf) {
- case CPU_UP_PREPARE:
- case CPU_DOWN_PREPARE:
- if (pairing == -1)
- return NOTIFY_BAD;
- }
- }
- return NOTIFY_DONE;
+ int pairing;
+
+ if (!bL_switcher_active)
+ return 0;
+
+ pairing = bL_switcher_cpu_pairing[cpu];
+
+ if (pairing == -1)
+ return -EINVAL;
+ return 0;
}
static bool no_bL_switcher;
@@ -782,8 +781,15 @@ static int __init bL_switcher_init(void)
if (!mcpm_is_available())
return -ENODEV;
- cpu_notifier(bL_switcher_hotplug_callback, 0);
-
+ cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare",
+ bL_switcher_cpu_pre, NULL);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown",
+ NULL, bL_switcher_cpu_pre);
+ if (ret < 0) {
+ cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE);
+ pr_err("bL_switcher: Failed to allocate a hotplug state\n");
+ return ret;
+ }
if (!no_bL_switcher) {
ret = bL_switcher_enable();
if (ret)
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index a8088290b778..27475904e096 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -22,6 +22,7 @@
#include <linux/io.h>
#include <asm/barrier.h>
+#include <asm/cacheflush.h>
#include <asm/cp15.h>
#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
@@ -230,19 +231,14 @@ static inline void gic_write_bpr1(u32 val)
* AArch32, since the syndrome register doesn't provide any information for
* them.
* Consequently, the following IO helpers use 32bit accesses.
- *
- * There are only two registers that need 64bit accesses in this driver:
- * - GICD_IROUTERn, contain the affinity values associated to each interrupt.
- * The upper-word (aff3) will always be 0, so there is no need for a lock.
- * - GICR_TYPER is an ID register and doesn't need atomicity.
*/
-static inline void gic_write_irouter(u64 val, volatile void __iomem *addr)
+static inline void __gic_writeq_nonatomic(u64 val, volatile void __iomem *addr)
{
writel_relaxed((u32)val, addr);
writel_relaxed((u32)(val >> 32), addr + 4);
}
-static inline u64 gic_read_typer(const volatile void __iomem *addr)
+static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
{
u64 val;
@@ -251,5 +247,49 @@ static inline u64 gic_read_typer(const volatile void __iomem *addr)
return val;
}
+#define gic_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
+
+/*
+ * GICD_IROUTERn, contain the affinity values associated to each interrupt.
+ * The upper-word (aff3) will always be 0, so there is no need for a lock.
+ */
+#define gic_write_irouter(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_TYPER is an ID register and doesn't need atomicity.
+ */
+#define gic_read_typer(c) __gic_readq_nonatomic(c)
+
+/*
+ * GITS_BASER - hi and lo bits may be accessed independently.
+ */
+#define gits_read_baser(c) __gic_readq_nonatomic(c)
+#define gits_write_baser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GICR_PENDBASER and GICR_PROPBASE are changed with LPIs disabled, so they
+ * won't be being used during any updates and can be changed non-atomically
+ */
+#define gicr_read_propbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_propbaser(v, c) __gic_writeq_nonatomic(v, c)
+#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c)
+#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GITS_TYPER is an ID register and doesn't need atomicity.
+ */
+#define gits_read_typer(c) __gic_readq_nonatomic(c)
+
+/*
+ * GITS_CBASER - hi and lo bits may be accessed independently.
+ */
+#define gits_read_cbaser(c) __gic_readq_nonatomic(c)
+#define gits_write_cbaser(v, c) __gic_writeq_nonatomic(v, c)
+
+/*
+ * GITS_CWRITER - hi and lo bits may be accessed independently.
+ */
+#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
+
#endif /* !__ASSEMBLY__ */
#endif /* !__ASM_ARCH_GICV3_H */
diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h
index 766bf9b78160..0b06f5341b45 100644
--- a/arch/arm/include/asm/efi.h
+++ b/arch/arm/include/asm/efi.h
@@ -57,6 +57,9 @@ void efi_virtmap_unload(void);
#define __efi_call_early(f, ...) f(__VA_ARGS__)
#define efi_is_64bit() (false)
+#define efi_call_proto(protocol, f, instance, ...) \
+ ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
+
struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg);
void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si);
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
deleted file mode 100644
index 87c044910fe0..000000000000
--- a/arch/arm/include/asm/mutex.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/include/asm/mutex.h
- *
- * ARM optimized mutex locking primitives
- *
- * Please look into asm-generic/mutex-xchg.h for a formal definition.
- */
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
-/*
- * On pre-ARMv6 hardware this results in a swp-based implementation,
- * which is the most efficient. For ARMv6+, we have exclusive memory
- * accessors and use atomic_dec to avoid the extra xchg operations
- * on the locking slowpaths.
- */
-#if __LINUX_ARM_ARCH__ < 6
-#include <asm-generic/mutex-xchg.h>
-#else
-#include <asm-generic/mutex-dec.h>
-#endif
-#endif /* _ASM_MUTEX_H */
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 8a1e8e995dae..c3d5fc124a05 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -82,8 +82,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
#endif
-#define cpu_relax_lowlatency() cpu_relax()
-
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index b8df45883cf7..188180b5523d 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -925,9 +925,9 @@ static bool core_has_os_save_restore(void)
}
}
-static void reset_ctrl_regs(void *unused)
+static void reset_ctrl_regs(unsigned int cpu)
{
- int i, raw_num_brps, err = 0, cpu = smp_processor_id();
+ int i, raw_num_brps, err = 0;
u32 val;
/*
@@ -1020,25 +1020,20 @@ out_mdbgen:
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
}
-static int dbg_reset_notify(struct notifier_block *self,
- unsigned long action, void *cpu)
+static int dbg_reset_online(unsigned int cpu)
{
- if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
- smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
-
- return NOTIFY_OK;
+ local_irq_disable();
+ reset_ctrl_regs(cpu);
+ local_irq_enable();
+ return 0;
}
-static struct notifier_block dbg_reset_nb = {
- .notifier_call = dbg_reset_notify,
-};
-
#ifdef CONFIG_CPU_PM
static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
void *v)
{
if (action == CPU_PM_EXIT)
- reset_ctrl_regs(NULL);
+ reset_ctrl_regs(smp_processor_id());
return NOTIFY_OK;
}
@@ -1059,6 +1054,8 @@ static inline void pm_init(void)
static int __init arch_hw_breakpoint_init(void)
{
+ int ret;
+
debug_arch = get_debug_arch();
if (!debug_arch_supported()) {
@@ -1072,25 +1069,28 @@ static int __init arch_hw_breakpoint_init(void)
core_num_brps = get_num_brps();
core_num_wrps = get_num_wrps();
- cpu_notifier_register_begin();
-
/*
* We need to tread carefully here because DBGSWENABLE may be
* driven low on this core and there isn't an architected way to
* determine that.
*/
+ get_online_cpus();
register_undef_hook(&debug_reg_hook);
/*
- * Reset the breakpoint resources. We assume that a halting
- * debugger will leave the world in a nice state for us.
+ * Register CPU notifier which resets the breakpoint resources. We
+ * assume that a halting debugger will leave the world in a nice state
+ * for us.
*/
- on_each_cpu(reset_ctrl_regs, NULL, 1);
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online",
+ dbg_reset_online, NULL);
unregister_undef_hook(&debug_reg_hook);
- if (!cpumask_empty(&debug_err_mask)) {
+ if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
core_num_brps = 0;
core_num_wrps = 0;
- cpu_notifier_register_done();
+ if (ret > 0)
+ cpuhp_remove_state_nocalls(ret);
+ put_online_cpus();
return 0;
}
@@ -1108,12 +1108,9 @@ static int __init arch_hw_breakpoint_init(void)
TRAP_HWBKPT, "watchpoint debug exception");
hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
TRAP_HWBKPT, "breakpoint debug exception");
+ put_online_cpus();
- /* Register hotplug and PM notifiers. */
- __register_cpu_notifier(&dbg_reset_nb);
-
- cpu_notifier_register_done();
-
+ /* Register PM notifiers. */
pm_init();
return 0;
}
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 66a11d1a6eac..c928015d39a2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -174,6 +174,7 @@
<GIC_PPI 14 IRQ_TYPE_LEVEL_LOW 0>,
<GIC_PPI 11 IRQ_TYPE_LEVEL_LOW 0>,
<GIC_PPI 10 IRQ_TYPE_LEVEL_LOW 0>;
+ arm,no-tick-in-suspend;
};
xin24m: xin24m {
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 28196b18e394..8365a84c2640 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -23,7 +23,6 @@ generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
generic-y += msi.h
-generic-y += mutex.h
generic-y += poll.h
generic-y += preempt.h
generic-y += resource.h
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index f8ae6d6e4767..f37e3a21f6e7 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -79,19 +79,10 @@
#include <linux/stringify.h>
#include <asm/barrier.h>
+#include <asm/cacheflush.h>
-#define read_gicreg(r) \
- ({ \
- u64 reg; \
- asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
- reg; \
- })
-
-#define write_gicreg(v,r) \
- do { \
- u64 __val = (v); \
- asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
- } while (0)
+#define read_gicreg read_sysreg_s
+#define write_gicreg write_sysreg_s
/*
* Low-level accessors
@@ -102,13 +93,13 @@
static inline void gic_write_eoir(u32 irq)
{
- asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq));
+ write_sysreg_s(irq, ICC_EOIR1_EL1);
isb();
}
static inline void gic_write_dir(u32 irq)
{
- asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq));
+ write_sysreg_s(irq, ICC_DIR_EL1);
isb();
}
@@ -116,7 +107,7 @@ static inline u64 gic_read_iar_common(void)
{
u64 irqstat;
- asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+ irqstat = read_sysreg_s(ICC_IAR1_EL1);
dsb(sy);
return irqstat;
}
@@ -132,12 +123,9 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
{
u64 irqstat;
- asm volatile(
- "nop;nop;nop;nop\n\t"
- "nop;nop;nop;nop\n\t"
- "mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t"
- "nop;nop;nop;nop"
- : "=r" (irqstat));
+ nops(8);
+ irqstat = read_sysreg_s(ICC_IAR1_EL1);
+ nops(4);
mb();
return irqstat;
@@ -145,37 +133,34 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
static inline void gic_write_pmr(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
+ write_sysreg_s(val, ICC_PMR_EL1);
}
static inline void gic_write_ctlr(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val));
+ write_sysreg_s(val, ICC_CTLR_EL1);
isb();
}
static inline void gic_write_grpen1(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val));
+ write_sysreg_s(val, ICC_GRPEN1_EL1);
isb();
}
static inline void gic_write_sgi1r(u64 val)
{
- asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+ write_sysreg_s(val, ICC_SGI1R_EL1);
}
static inline u32 gic_read_sre(void)
{
- u64 val;
-
- asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
- return val;
+ return read_sysreg_s(ICC_SRE_EL1);
}
static inline void gic_write_sre(u32 val)
{
- asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val));
+ write_sysreg_s(val, ICC_SRE_EL1);
isb();
}
@@ -187,5 +172,21 @@ static inline void gic_write_bpr1(u32 val)
#define gic_read_typer(c) readq_relaxed(c)
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
+#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+
+#define gits_read_baser(c) readq_relaxed(c)
+#define gits_write_baser(v, c) writeq_relaxed(v, c)
+
+#define gits_read_cbaser(c) readq_relaxed(c)
+#define gits_write_cbaser(v, c) writeq_relaxed(v, c)
+
+#define gits_write_cwriter(v, c) writeq_relaxed(v, c)
+
+#define gicr_read_propbaser(c) readq_relaxed(c)
+#define gicr_write_propbaser(v, c) writeq_relaxed(v, c)
+
+#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
+#define gicr_read_pendbaser(c) readq_relaxed(c)
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 3a405dccb6cf..0b6b1633017f 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -52,6 +52,9 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define __efi_call_early(f, ...) f(__VA_ARGS__)
#define efi_is_64bit() (true)
+#define efi_call_proto(protocol, f, instance, ...) \
+ ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
+
#define alloc_screen_info(x...) &screen_info
#define free_screen_info(x...)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 60e34824e18c..747c65a616ed 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -149,8 +149,6 @@ static inline void cpu_relax(void)
asm volatile("yield" ::: "memory");
}
-#define cpu_relax_lowlatency() cpu_relax()
-
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index b3d5b3e8fbcb..7b7be71e87bf 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -227,7 +227,7 @@ static struct attribute_group cpuregs_attr_group = {
.name = "identification"
};
-static int cpuid_add_regs(int cpu)
+static int cpuid_cpu_online(unsigned int cpu)
{
int rc;
struct device *dev;
@@ -248,7 +248,7 @@ out:
return rc;
}
-static int cpuid_remove_regs(int cpu)
+static int cpuid_cpu_offline(unsigned int cpu)
{
struct device *dev;
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
@@ -264,40 +264,22 @@ static int cpuid_remove_regs(int cpu)
return 0;
}
-static int cpuid_callback(struct notifier_block *nb,
- unsigned long action, void *hcpu)
-{
- int rc = 0;
- unsigned long cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = cpuid_add_regs(cpu);
- break;
- case CPU_DEAD:
- rc = cpuid_remove_regs(cpu);
- break;
- }
-
- return notifier_from_errno(rc);
-}
-
static int __init cpuinfo_regs_init(void)
{
- int cpu;
-
- cpu_notifier_register_begin();
+ int cpu, ret;
for_each_possible_cpu(cpu) {
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
kobject_init(&info->kobj, &cpuregs_kobj_type);
- if (cpu_online(cpu))
- cpuid_add_regs(cpu);
}
- __hotcpu_notifier(cpuid_callback, 0);
- cpu_notifier_register_done();
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm64/cpuinfo:online",
+ cpuid_cpu_online, cpuid_cpu_offline);
+ if (ret < 0) {
+ pr_err("cpuinfo: failed to register hotplug callbacks.\n");
+ return ret;
+ }
return 0;
}
static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
diff --git a/arch/avr32/include/asm/mutex.h b/arch/avr32/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/avr32/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h
index 941593c7d9f3..972adcc1e8f4 100644
--- a/arch/avr32/include/asm/processor.h
+++ b/arch/avr32/include/asm/processor.h
@@ -92,7 +92,6 @@ extern struct avr32_cpuinfo boot_cpu_data;
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
struct cpu_context {
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 91d49c0a3118..2fb67b59d188 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -24,7 +24,6 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mman.h
generic-y += msgbuf.h
-generic-y += mutex.h
generic-y += param.h
generic-y += percpu.h
generic-y += pgalloc.h
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index 0c265aba94ad..85d4af97c986 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -92,7 +92,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define cpu_relax() smp_mb()
-#define cpu_relax_lowlatency() cpu_relax()
/* Get the Silicon Revision of the chip */
static inline uint32_t __pure bfin_revid(void)
diff --git a/arch/c6x/include/asm/mutex.h b/arch/c6x/include/asm/mutex.h
deleted file mode 100644
index 7a7248e0462d..000000000000
--- a/arch/c6x/include/asm/mutex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_C6X_MUTEX_H
-#define _ASM_C6X_MUTEX_H
-
-#include <asm-generic/mutex-null.h>
-
-#endif /* _ASM_C6X_MUTEX_H */
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h
index f2ef31be2f8b..b9eb3da7f278 100644
--- a/arch/c6x/include/asm/processor.h
+++ b/arch/c6x/include/asm/processor.h
@@ -121,7 +121,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#define cpu_relax() do { } while (0)
-#define cpu_relax_lowlatency() cpu_relax()
extern const struct seq_operations cpuinfo_op;
diff --git a/arch/cris/include/asm/mutex.h b/arch/cris/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/cris/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h
index 862126b58116..15b815df29c1 100644
--- a/arch/cris/include/asm/processor.h
+++ b/arch/cris/include/asm/processor.h
@@ -63,7 +63,6 @@ static inline void release_thread(struct task_struct *dead_task)
#define init_stack (init_thread_union.stack)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
diff --git a/arch/frv/include/asm/mutex.h b/arch/frv/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/frv/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h
index 73f0a79ad8e6..ddaeb9cc9143 100644
--- a/arch/frv/include/asm/processor.h
+++ b/arch/frv/include/asm/processor.h
@@ -107,7 +107,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/* data cache prefetch */
#define ARCH_HAS_PREFETCH
diff --git a/arch/h8300/include/asm/mutex.h b/arch/h8300/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/h8300/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
index 111df7397ac7..65132d7ae9e5 100644
--- a/arch/h8300/include/asm/processor.h
+++ b/arch/h8300/include/asm/processor.h
@@ -127,7 +127,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define HARD_RESET_NOW() ({ \
local_irq_disable(); \
diff --git a/arch/hexagon/include/asm/mutex.h b/arch/hexagon/include/asm/mutex.h
deleted file mode 100644
index 58b52de1bc22..000000000000
--- a/arch/hexagon/include/asm/mutex.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-#include <asm-generic/mutex-xchg.h>
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h
index d8501137c8d0..45a825402f63 100644
--- a/arch/hexagon/include/asm/processor.h
+++ b/arch/hexagon/include/asm/processor.h
@@ -56,7 +56,6 @@ struct thread_struct {
}
#define cpu_relax() __vmyield()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* Decides where the kernel will search for a free chunk of vm space during
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
deleted file mode 100644
index 28cb819e0ff9..000000000000
--- a/arch/ia64/include/asm/mutex.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * ia64 implementation of the mutex fastpath.
- *
- * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
- *
- */
-
-#ifndef _ASM_MUTEX_H
-#define _ASM_MUTEX_H
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
- fail_fn(count);
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
- return -1;
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the count from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, then the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int ret = ia64_fetchadd4_rel(count, 1);
- if (unlikely(ret < 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (atomic_read(count) == 1 && cmpxchg_acq(count, 1, 0) == 1)
- return 1;
- return 0;
-}
-
-#endif
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index ce53c50d0ba4..03911a336406 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -547,7 +547,6 @@ ia64_eoi (void)
}
#define cpu_relax() ia64_hint(ia64_hint_pause)
-#define cpu_relax_lowlatency() cpu_relax()
static inline int
ia64_get_irr(unsigned int vector)
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 5ed0ea92c5bf..85bba43e7d5d 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -224,85 +224,45 @@ static struct attribute_group err_inject_attr_group = {
.name = "err_inject"
};
/* Add/Remove err_inject interface for CPU device */
-static int err_inject_add_dev(struct device *sys_dev)
+static int err_inject_add_dev(unsigned int cpu)
{
+ struct device *sys_dev = get_cpu_device(cpu);
+
return sysfs_create_group(&sys_dev->kobj, &err_inject_attr_group);
}
-static int err_inject_remove_dev(struct device *sys_dev)
+static int err_inject_remove_dev(unsigned int cpu)
{
+ struct device *sys_dev = get_cpu_device(cpu);
+
sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
return 0;
}
-static int err_inject_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *sys_dev;
-
- sys_dev = get_cpu_device(cpu);
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- err_inject_add_dev(sys_dev);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- err_inject_remove_dev(sys_dev);
- break;
- }
-
- return NOTIFY_OK;
-}
-static struct notifier_block err_inject_cpu_notifier =
-{
- .notifier_call = err_inject_cpu_callback,
-};
+static enum cpuhp_state hp_online;
-static int __init
-err_inject_init(void)
+static int __init err_inject_init(void)
{
- int i;
-
+ int ret;
#ifdef ERR_INJ_DEBUG
printk(KERN_INFO "Enter error injection driver.\n");
#endif
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- err_inject_cpu_callback(&err_inject_cpu_notifier, CPU_ONLINE,
- (void *)(long)i);
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/err_inj:online",
+ err_inject_add_dev, err_inject_remove_dev);
+ if (ret >= 0) {
+ hp_online = ret;
+ ret = 0;
}
-
- __register_hotcpu_notifier(&err_inject_cpu_notifier);
-
- cpu_notifier_register_done();
-
- return 0;
+ return ret;
}
-static void __exit
-err_inject_exit(void)
+static void __exit err_inject_exit(void)
{
- int i;
- struct device *sys_dev;
-
#ifdef ERR_INJ_DEBUG
printk(KERN_INFO "Exit error injection driver.\n");
#endif
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- sys_dev = get_cpu_device(i);
- sysfs_remove_group(&sys_dev->kobj, &err_inject_attr_group);
- }
-
- __unregister_hotcpu_notifier(&err_inject_cpu_notifier);
-
- cpu_notifier_register_done();
+ cpuhp_remove_state(hp_online);
}
module_init(err_inject_init);
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index c39c3cd3ac34..b6e597860888 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -932,8 +932,7 @@ static const struct file_operations proc_palinfo_fops = {
.release = single_release,
};
-static void
-create_palinfo_proc_entries(unsigned int cpu)
+static int palinfo_add_proc(unsigned int cpu)
{
pal_func_cpu_u_t f;
struct proc_dir_entry *cpu_dir;
@@ -943,7 +942,7 @@ create_palinfo_proc_entries(unsigned int cpu)
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
if (!cpu_dir)
- return;
+ return -EINVAL;
f.req_cpu = cpu;
@@ -952,42 +951,21 @@ create_palinfo_proc_entries(unsigned int cpu)
proc_create_data(palinfo_entries[j].name, 0, cpu_dir,
&proc_palinfo_fops, (void *)f.value);
}
+ return 0;
}
-static void
-remove_palinfo_proc_entries(unsigned int hcpu)
+static int palinfo_del_proc(unsigned int hcpu)
{
char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
+
sprintf(cpustr, "cpu%d", hcpu);
remove_proc_subtree(cpustr, palinfo_dir);
+ return 0;
}
-static int palinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int hotcpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- create_palinfo_proc_entries(hotcpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- remove_palinfo_proc_entries(hotcpu);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __refdata palinfo_cpu_notifier =
-{
- .notifier_call = palinfo_cpu_callback,
- .priority = 0,
-};
+static enum cpuhp_state hp_online;
-static int __init
-palinfo_init(void)
+static int __init palinfo_init(void)
{
int i = 0;
@@ -996,25 +974,19 @@ palinfo_init(void)
if (!palinfo_dir)
return -ENOMEM;
- cpu_notifier_register_begin();
-
- /* Create palinfo dirs in /proc for all online cpus */
- for_each_online_cpu(i) {
- create_palinfo_proc_entries(i);
+ i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/palinfo:online",
+ palinfo_add_proc, palinfo_del_proc);
+ if (i < 0) {
+ remove_proc_subtree("pal", NULL);
+ return i;
}
-
- /* Register for future delivery via notify registration */
- __register_hotcpu_notifier(&palinfo_cpu_notifier);
-
- cpu_notifier_register_done();
-
+ hp_online = i;
return 0;
}
-static void __exit
-palinfo_exit(void)
+static void __exit palinfo_exit(void)
{
- unregister_hotcpu_notifier(&palinfo_cpu_notifier);
+ cpuhp_remove_state(hp_online);
remove_proc_subtree("pal", NULL);
}
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 5313007d5423..aaf74f36cfa1 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -550,52 +550,40 @@ static const struct file_operations salinfo_data_fops = {
.llseek = default_llseek,
};
-static int
-salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
+static int salinfo_cpu_online(unsigned int cpu)
{
- unsigned int i, cpu = (unsigned long)hcpu;
- unsigned long flags;
+ unsigned int i, end = ARRAY_SIZE(salinfo_data);
struct salinfo_data *data;
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- spin_lock_irqsave(&data_saved_lock, flags);
- for (i = 0, data = salinfo_data;
- i < ARRAY_SIZE(salinfo_data);
- ++i, ++data) {
- cpumask_set_cpu(cpu, &data->cpu_event);
- wake_up_interruptible(&data->read_wait);
- }
- spin_unlock_irqrestore(&data_saved_lock, flags);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- spin_lock_irqsave(&data_saved_lock, flags);
- for (i = 0, data = salinfo_data;
- i < ARRAY_SIZE(salinfo_data);
- ++i, ++data) {
- struct salinfo_data_saved *data_saved;
- int j;
- for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j;
- j >= 0;
- --j, --data_saved) {
- if (data_saved->buffer && data_saved->cpu == cpu) {
- shift1_data_saved(data, j);
- }
- }
- cpumask_clear_cpu(cpu, &data->cpu_event);
- }
- spin_unlock_irqrestore(&data_saved_lock, flags);
- break;
+
+ spin_lock_irq(&data_saved_lock);
+ for (i = 0, data = salinfo_data; i < end; ++i, ++data) {
+ cpumask_set_cpu(cpu, &data->cpu_event);
+ wake_up_interruptible(&data->read_wait);
}
- return NOTIFY_OK;
+ spin_unlock_irq(&data_saved_lock);
+ return 0;
}
-static struct notifier_block salinfo_cpu_notifier =
+static int salinfo_cpu_pre_down(unsigned int cpu)
{
- .notifier_call = salinfo_cpu_callback,
- .priority = 0,
-};
+ unsigned int i, end = ARRAY_SIZE(salinfo_data);
+ struct salinfo_data *data;
+
+ spin_lock_irq(&data_saved_lock);
+ for (i = 0, data = salinfo_data; i < end; ++i, ++data) {
+ struct salinfo_data_saved *data_saved;
+ int j = ARRAY_SIZE(data->data_saved) - 1;
+
+ for (data_saved = data->data_saved + j; j >= 0;
+ --j, --data_saved) {
+ if (data_saved->buffer && data_saved->cpu == cpu)
+ shift1_data_saved(data, j);
+ }
+ cpumask_clear_cpu(cpu, &data->cpu_event);
+ }
+ spin_unlock_irq(&data_saved_lock);
+ return 0;
+}
static int __init
salinfo_init(void)
@@ -604,7 +592,7 @@ salinfo_init(void)
struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */
struct proc_dir_entry *dir, *entry;
struct salinfo_data *data;
- int i, j;
+ int i;
salinfo_dir = proc_mkdir("sal", NULL);
if (!salinfo_dir)
@@ -617,8 +605,6 @@ salinfo_init(void)
(void *)salinfo_entries[i].feature);
}
- cpu_notifier_register_begin();
-
for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
data = salinfo_data + i;
data->type = i;
@@ -639,10 +625,6 @@ salinfo_init(void)
continue;
*sdir++ = entry;
- /* we missed any events before now */
- for_each_online_cpu(j)
- cpumask_set_cpu(j, &data->cpu_event);
-
*sdir++ = dir;
}
@@ -653,10 +635,9 @@ salinfo_init(void)
salinfo_timer.function = &salinfo_timeout;
add_timer(&salinfo_timer);
- __register_hotcpu_notifier(&salinfo_cpu_notifier);
-
- cpu_notifier_register_done();
-
+ i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/salinfo:online",
+ salinfo_cpu_online, salinfo_cpu_pre_down);
+ WARN_ON(i < 0);
return 0;
}
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 6f892b94e906..021f44ab4bfb 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -68,7 +68,7 @@ void vtime_account_user(struct task_struct *tsk)
if (ti->ac_utime) {
delta_utime = cycle_to_cputime(ti->ac_utime);
- account_user_time(tsk, delta_utime, delta_utime);
+ account_user_time(tsk, delta_utime);
ti->ac_utime = 0;
}
}
@@ -112,7 +112,7 @@ void vtime_account_system(struct task_struct *tsk)
{
cputime_t delta = vtime_delta(tsk);
- account_system_time(tsk, 0, delta, delta);
+ account_system_time(tsk, 0, delta);
}
EXPORT_SYMBOL_GPL(vtime_account_system);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index c01fe8991244..1a68f012a6dc 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -349,9 +349,9 @@ static int cpu_cache_sysfs_init(unsigned int cpu)
}
/* Add cache interface for CPU device */
-static int cache_add_dev(struct device *sys_dev)
+static int cache_add_dev(unsigned int cpu)
{
- unsigned int cpu = sys_dev->id;
+ struct device *sys_dev = get_cpu_device(cpu);
unsigned long i, j;
struct cache_info *this_object;
int retval = 0;
@@ -399,9 +399,8 @@ static int cache_add_dev(struct device *sys_dev)
}
/* Remove cache interface for CPU device */
-static int cache_remove_dev(struct device *sys_dev)
+static int cache_remove_dev(unsigned int cpu)
{
- unsigned int cpu = sys_dev->id;
unsigned long i;
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
@@ -419,52 +418,13 @@ static int cache_remove_dev(struct device *sys_dev)
return 0;
}
-/*
- * When a cpu is hot-plugged, do a check and initiate
- * cache kobject if necessary
- */
-static int cache_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *sys_dev;
-
- sys_dev = get_cpu_device(cpu);
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cache_add_dev(sys_dev);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- cache_remove_dev(sys_dev);
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block cache_cpu_notifier =
-{
- .notifier_call = cache_cpu_callback
-};
-
static int __init cache_sysfs_init(void)
{
- int i;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- struct device *sys_dev = get_cpu_device((unsigned int)i);
- cache_add_dev(sys_dev);
- }
-
- __register_hotcpu_notifier(&cache_cpu_notifier);
-
- cpu_notifier_register_done();
+ int ret;
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/topology:online",
+ cache_add_dev, cache_remove_dev);
+ WARN_ON(ret < 0);
return 0;
}
-
device_initcall(cache_sysfs_init);
-
diff --git a/arch/m32r/include/asm/mutex.h b/arch/m32r/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/m32r/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h
index 9f8fd9bef70f..5767367550c6 100644
--- a/arch/m32r/include/asm/processor.h
+++ b/arch/m32r/include/asm/processor.h
@@ -133,6 +133,5 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* _ASM_M32R_PROCESSOR_H */
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index eb85bd9c6180..1f2e5d31cb24 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -20,7 +20,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += mman.h
-generic-y += mutex.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += resource.h
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index c84a2183b3f0..f5f790c31bf8 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -156,6 +156,5 @@ unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#endif
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 29acb89daaaa..167150c701d1 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -27,7 +27,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
-generic-y += mutex.h
generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index a0333ebcac35..ec6a49076980 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -152,7 +152,6 @@ unsigned long get_wchan(struct task_struct *p);
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
extern void setup_priv(void);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 86f65721e629..85885a501dce 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -27,6 +27,7 @@ config MICROBLAZE
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_OPROFILE
select IRQ_DOMAIN
+ select XILINX_INTC
select MODULES_USE_ELF_RELA
select OF
select OF_EARLY_FLATTREE
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index bab3b1393ad4..d785defeeed5 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -16,6 +16,6 @@ struct pt_regs;
extern void do_IRQ(struct pt_regs *regs);
/* should be defined in each interrupt controller driver */
-extern unsigned int get_irq(void);
+extern unsigned int xintc_get_irq(void);
#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/include/asm/mutex.h b/arch/microblaze/include/asm/mutex.h
deleted file mode 100644
index ff6101aa2c71..000000000000
--- a/arch/microblaze/include/asm/mutex.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index c38d0dd91134..37ef196e4519 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -22,7 +22,6 @@
extern const struct seq_operations cpuinfo_op;
# define cpu_relax() barrier()
-# define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(tsk) \
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index f08bacaf8a95..e098381af928 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -15,7 +15,7 @@ endif
extra-y := head.o vmlinux.lds
obj-y += dma.o exceptions.o \
- hw_exception_handler.o intc.o irq.o \
+ hw_exception_handler.o irq.o \
platform.o process.o prom.o ptrace.o \
reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
deleted file mode 100644
index 90bec7d71f85..000000000000
--- a/arch/microblaze/kernel/intc.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2012-2013 Xilinx, Inc.
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/irqchip.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-
-static void __iomem *intc_baseaddr;
-
-/* No one else should require these constants, so define them locally here. */
-#define ISR 0x00 /* Interrupt Status Register */
-#define IPR 0x04 /* Interrupt Pending Register */
-#define IER 0x08 /* Interrupt Enable Register */
-#define IAR 0x0c /* Interrupt Acknowledge Register */
-#define SIE 0x10 /* Set Interrupt Enable bits */
-#define CIE 0x14 /* Clear Interrupt Enable bits */
-#define IVR 0x18 /* Interrupt Vector Register */
-#define MER 0x1c /* Master Enable Register */
-
-#define MER_ME (1<<0)
-#define MER_HIE (1<<1)
-
-static unsigned int (*read_fn)(void __iomem *);
-static void (*write_fn)(u32, void __iomem *);
-
-static void intc_write32(u32 val, void __iomem *addr)
-{
- iowrite32(val, addr);
-}
-
-static unsigned int intc_read32(void __iomem *addr)
-{
- return ioread32(addr);
-}
-
-static void intc_write32_be(u32 val, void __iomem *addr)
-{
- iowrite32be(val, addr);
-}
-
-static unsigned int intc_read32_be(void __iomem *addr)
-{
- return ioread32be(addr);
-}
-
-static void intc_enable_or_unmask(struct irq_data *d)
-{
- unsigned long mask = 1 << d->hwirq;
-
- pr_debug("enable_or_unmask: %ld\n", d->hwirq);
-
- /* ack level irqs because they can't be acked during
- * ack function since the handle_level_irq function
- * acks the irq before calling the interrupt handler
- */
- if (irqd_is_level_type(d))
- write_fn(mask, intc_baseaddr + IAR);
-
- write_fn(mask, intc_baseaddr + SIE);
-}
-
-static void intc_disable_or_mask(struct irq_data *d)
-{
- pr_debug("disable: %ld\n", d->hwirq);
- write_fn(1 << d->hwirq, intc_baseaddr + CIE);
-}
-
-static void intc_ack(struct irq_data *d)
-{
- pr_debug("ack: %ld\n", d->hwirq);
- write_fn(1 << d->hwirq, intc_baseaddr + IAR);
-}
-
-static void intc_mask_ack(struct irq_data *d)
-{
- unsigned long mask = 1 << d->hwirq;
-
- pr_debug("disable_and_ack: %ld\n", d->hwirq);
- write_fn(mask, intc_baseaddr + CIE);
- write_fn(mask, intc_baseaddr + IAR);
-}
-
-static struct irq_chip intc_dev = {
- .name = "Xilinx INTC",
- .irq_unmask = intc_enable_or_unmask,
- .irq_mask = intc_disable_or_mask,
- .irq_ack = intc_ack,
- .irq_mask_ack = intc_mask_ack,
-};
-
-static struct irq_domain *root_domain;
-
-unsigned int get_irq(void)
-{
- unsigned int hwirq, irq = -1;
-
- hwirq = read_fn(intc_baseaddr + IVR);
- if (hwirq != -1U)
- irq = irq_find_mapping(root_domain, hwirq);
-
- pr_debug("get_irq: hwirq=%d, irq=%d\n", hwirq, irq);
-
- return irq;
-}
-
-static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
-{
- u32 intr_mask = (u32)d->host_data;
-
- if (intr_mask & (1 << hw)) {
- irq_set_chip_and_handler_name(irq, &intc_dev,
- handle_edge_irq, "edge");
- irq_clear_status_flags(irq, IRQ_LEVEL);
- } else {
- irq_set_chip_and_handler_name(irq, &intc_dev,
- handle_level_irq, "level");
- irq_set_status_flags(irq, IRQ_LEVEL);
- }
- return 0;
-}
-
-static const struct irq_domain_ops xintc_irq_domain_ops = {
- .xlate = irq_domain_xlate_onetwocell,
- .map = xintc_map,
-};
-
-static int __init xilinx_intc_of_init(struct device_node *intc,
- struct device_node *parent)
-{
- u32 nr_irq, intr_mask;
- int ret;
-
- intc_baseaddr = of_iomap(intc, 0);
- BUG_ON(!intc_baseaddr);
-
- ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
- if (ret < 0) {
- pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__);
- return ret;
- }
-
- ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask);
- if (ret < 0) {
- pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__);
- return ret;
- }
-
- if (intr_mask >> nr_irq)
- pr_warn("%s: mismatch in kind-of-intr param\n", __func__);
-
- pr_info("%s: num_irq=%d, edge=0x%x\n",
- intc->full_name, nr_irq, intr_mask);
-
- write_fn = intc_write32;
- read_fn = intc_read32;
-
- /*
- * Disable all external interrupts until they are
- * explicity requested.
- */
- write_fn(0, intc_baseaddr + IER);
-
- /* Acknowledge any pending interrupts just in case. */
- write_fn(0xffffffff, intc_baseaddr + IAR);
-
- /* Turn on the Master Enable. */
- write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
- if (!(read_fn(intc_baseaddr + MER) & (MER_HIE | MER_ME))) {
- write_fn = intc_write32_be;
- read_fn = intc_read32_be;
- write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
- }
-
- /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
- * lazy and Michal can clean it up to something nicer when he tests
- * and commits this patch. ~~gcl */
- root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
- (void *)intr_mask);
-
- irq_set_default_host(root_domain);
-
- return 0;
-}
-
-IRQCHIP_DECLARE(xilinx_intc, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index 11e24de91aa4..903dad822fad 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -29,12 +29,12 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
trace_hardirqs_off();
irq_enter();
- irq = get_irq();
+ irq = xintc_get_irq();
next_irq:
BUG_ON(!irq);
generic_handle_irq(irq);
- irq = get_irq();
+ irq = xintc_get_irq();
if (irq != -1U) {
pr_debug("next irq: %d\n", irq);
++concurrent_irq;
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 9740066cc631..3269b742a75e 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -9,7 +9,6 @@ generic-y += irq_work.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mutex.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 0d36c87acbe2..95b8c471f572 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -389,7 +389,6 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* Return_address is a replacement for __builtin_return_address(count)
diff --git a/arch/mn10300/include/asm/mutex.h b/arch/mn10300/include/asm/mutex.h
deleted file mode 100644
index 84f5490c6fb4..000000000000
--- a/arch/mn10300/include/asm/mutex.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* MN10300 Mutex fastpath
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- *
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-#include <asm-generic/mutex-null.h>
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h
index b10ba121c849..18e17abf7664 100644
--- a/arch/mn10300/include/asm/processor.h
+++ b/arch/mn10300/include/asm/processor.h
@@ -69,7 +69,6 @@ extern void print_cpu_info(struct mn10300_cpuinfo *);
extern void dodgy_tsc(void);
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* User space process size: 1.75GB (default).
diff --git a/arch/nios2/include/asm/mutex.h b/arch/nios2/include/asm/mutex.h
deleted file mode 100644
index ff6101aa2c71..000000000000
--- a/arch/nios2/include/asm/mutex.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/nios2/include/asm/processor.h b/arch/nios2/include/asm/processor.h
index 1c953f0cadbf..3bbbc3d798e5 100644
--- a/arch/nios2/include/asm/processor.h
+++ b/arch/nios2/include/asm/processor.h
@@ -88,7 +88,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* __ASSEMBLY__ */
diff --git a/arch/openrisc/include/asm/mutex.h b/arch/openrisc/include/asm/mutex.h
deleted file mode 100644
index b85a0cfa9fc9..000000000000
--- a/arch/openrisc/include/asm/mutex.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index 70334c9f7d24..a908e6c30a00 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -92,7 +92,6 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
#define init_stack (init_thread_union.stack)
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* __ASSEMBLY__ */
#endif /* __ASM_OPENRISC_PROCESSOR_H */
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index f9b3a81aefcd..91f53c07f410 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mutex.h
generic-y += param.h
generic-y += percpu.h
generic-y += poll.h
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 2e674e13e005..ca40741378be 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -309,7 +309,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* parisc_requires_coherency() is used to identify the combined VIPT/PIPT
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 48001e754ff2..3da87e198878 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -161,6 +161,7 @@ config PPC
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select GENERIC_CPU_AUTOPROBE
select HAVE_VIRT_CPU_ACCOUNTING
+ select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_KERNEL_GZIP
select HAVE_CC_STACKPROTECTOR
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 4f60db074725..aa2e6a34b872 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -46,26 +46,12 @@ extern cputime_t cputime_one_jiffy;
* Convert cputime <-> jiffies
*/
extern u64 __cputime_jiffies_factor;
-DECLARE_PER_CPU(unsigned long, cputime_last_delta);
-DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
{
return mulhdu((__force u64) ct, __cputime_jiffies_factor);
}
-/* Estimate the scaled cputime by scaling the real cputime based on
- * the last scaled to real ratio */
-static inline cputime_t cputime_to_scaled(const cputime_t ct)
-{
- if (cpu_has_feature(CPU_FTR_SPURR) &&
- __this_cpu_read(cputime_last_delta))
- return (__force u64) ct *
- __this_cpu_read(cputime_scaled_last_delta) /
- __this_cpu_read(cputime_last_delta);
- return ct;
-}
-
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
{
u64 ct;
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
deleted file mode 100644
index 078155fa1189..000000000000
--- a/arch/powerpc/include/asm/mutex.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
- */
-#ifndef _ASM_POWERPC_MUTEX_H
-#define _ASM_POWERPC_MUTEX_H
-
-static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
-{
- int t;
-
- __asm__ __volatile__ (
-"1: lwarx %0,0,%1 # mutex trylock\n\
- cmpw 0,%0,%2\n\
- bne- 2f\n"
- PPC405_ERR77(0,%1)
-" stwcx. %3,0,%1\n\
- bne- 1b"
- PPC_ACQUIRE_BARRIER
- "\n\
-2:"
- : "=&r" (t)
- : "r" (&v->counter), "r" (old), "r" (new)
- : "cc", "memory");
-
- return t;
-}
-
-static inline int __mutex_dec_return_lock(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%1 # mutex lock\n\
- addic %0,%0,-1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1\n\
- bne- 1b"
- PPC_ACQUIRE_BARRIER
- : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-static inline int __mutex_inc_return_unlock(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%1 # mutex unlock\n\
- addic %0,%0,1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1 \n\
- bne- 1b"
- : "=&r" (t)
- : "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(__mutex_dec_return_lock(count) < 0))
- fail_fn(count);
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(__mutex_dec_return_lock(count) < 0))
- return -1;
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the count from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(__mutex_inc_return_unlock(count) <= 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to 0, and return 1 (success), or if the count
- * was not 1, then return 0 (failure).
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (likely(atomic_read(count) == 1 && __mutex_cmpxchg_lock(count, 1, 0) == 1))
- return 1;
- return 0;
-}
-
-#endif
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 5c0a665af1c3..1ba814436c73 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -402,8 +402,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#define cpu_relax() barrier()
#endif
-#define cpu_relax_lowlatency() cpu_relax()
-
/* Check that a certain kernel stack pointer is valid in task_struct p */
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index fa37fe93bc02..8c1b913de6d7 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -52,6 +52,14 @@
#define SYNC_IO
#endif
+#ifdef CONFIG_PPC_PSERIES
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
+}
+#endif
+
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.slock == 0;
diff --git a/arch/powerpc/include/asm/xilinx_intc.h b/arch/powerpc/include/asm/xilinx_intc.h
index 343612f8fece..3192d7f0a05b 100644
--- a/arch/powerpc/include/asm/xilinx_intc.h
+++ b/arch/powerpc/include/asm/xilinx_intc.h
@@ -14,7 +14,7 @@
#ifdef __KERNEL__
extern void __init xilinx_intc_init_tree(void);
-extern unsigned int xilinx_intc_get_irq(void);
+extern unsigned int xintc_get_irq(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_XILINX_INTC_H */
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index c4f1d1f7bae0..c1fb255a60d6 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -703,7 +703,7 @@ static struct device_attribute pa6t_attrs[] = {
#endif /* HAS_PPC_PMC_PA6T */
#endif /* HAS_PPC_PMC_CLASSIC */
-static void register_cpu_online(unsigned int cpu)
+static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
@@ -782,11 +782,12 @@ static void register_cpu_online(unsigned int cpu)
}
#endif
cacheinfo_cpu_online(cpu);
+ return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void unregister_cpu_online(unsigned int cpu)
+static int unregister_cpu_online(unsigned int cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
struct device_attribute *attrs, *pmc_attrs;
@@ -863,6 +864,8 @@ static void unregister_cpu_online(unsigned int cpu)
}
#endif
cacheinfo_cpu_offline(cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+ return 0;
}
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
@@ -883,32 +886,6 @@ ssize_t arch_cpu_release(const char *buf, size_t count)
}
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-#endif /* CONFIG_HOTPLUG_CPU */
-
-static int sysfs_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned int)(long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- register_cpu_online(cpu);
- break;
-#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- unregister_cpu_online(cpu);
- break;
-#endif
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block sysfs_cpu_nb = {
- .notifier_call = sysfs_cpu_notify,
-};
-
static DEFINE_MUTEX(cpu_mutex);
int cpu_add_dev_attr(struct device_attribute *attr)
@@ -1023,12 +1000,10 @@ static DEVICE_ATTR(physical_id, 0444, show_physical_id, NULL);
static int __init topology_init(void)
{
- int cpu;
+ int cpu, r;
register_nodes();
- cpu_notifier_register_begin();
-
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -1047,15 +1022,10 @@ static int __init topology_init(void)
device_create_file(&c->dev, &dev_attr_physical_id);
}
-
- if (cpu_online(cpu))
- register_cpu_online(cpu);
}
-
- __register_cpu_notifier(&sysfs_cpu_nb);
-
- cpu_notifier_register_done();
-
+ r = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/topology:online",
+ register_cpu_online, unregister_cpu_online);
+ WARN_ON(r < 0);
#ifdef CONFIG_PPC64
sysfs_create_dscr_default();
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index bc3f7d0d7b79..be9751f1cb2a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -164,8 +164,6 @@ u64 __cputime_sec_factor;
EXPORT_SYMBOL(__cputime_sec_factor);
u64 __cputime_clockt_factor;
EXPORT_SYMBOL(__cputime_clockt_factor);
-DEFINE_PER_CPU(unsigned long, cputime_last_delta);
-DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
cputime_t cputime_one_jiffy;
@@ -360,7 +358,8 @@ void vtime_account_system(struct task_struct *tsk)
unsigned long delta, sys_scaled, stolen;
delta = vtime_delta(tsk, &sys_scaled, &stolen);
- account_system_time(tsk, 0, delta, sys_scaled);
+ account_system_time(tsk, 0, delta);
+ tsk->stimescaled += sys_scaled;
if (stolen)
account_steal_time(stolen);
}
@@ -393,7 +392,8 @@ void vtime_account_user(struct task_struct *tsk)
acct->user_time = 0;
acct->user_time_scaled = 0;
acct->utime_sspurr = 0;
- account_user_time(tsk, utime, utimescaled);
+ account_user_time(tsk, utime);
+ tsk->utimescaled += utimescaled;
}
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3686471be32b..39ef1f4a7b02 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2254,12 +2254,12 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
* enter the guest. Only do this if it is the primary thread of the
* core (not if a subcore) that is entering the guest.
*/
-static inline void kvmppc_clear_host_core(int cpu)
+static inline int kvmppc_clear_host_core(unsigned int cpu)
{
int core;
if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
- return;
+ return 0;
/*
* Memory barrier can be omitted here as we will do a smp_wmb()
* later in kvmppc_start_thread and we need ensure that state is
@@ -2267,6 +2267,7 @@ static inline void kvmppc_clear_host_core(int cpu)
*/
core = cpu >> threads_shift;
kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
+ return 0;
}
/*
@@ -2274,12 +2275,12 @@ static inline void kvmppc_clear_host_core(int cpu)
* Only need to do this if it is the primary thread of the core that is
* exiting.
*/
-static inline void kvmppc_set_host_core(int cpu)
+static inline int kvmppc_set_host_core(unsigned int cpu)
{
int core;
if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
- return;
+ return 0;
/*
* Memory barrier can be omitted here because we do a spin_unlock
@@ -2287,6 +2288,7 @@ static inline void kvmppc_set_host_core(int cpu)
*/
core = cpu >> threads_shift;
kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
+ return 0;
}
/*
@@ -3094,36 +3096,6 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_KVM_XICS
-static int kvmppc_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- unsigned long cpu = (long)hcpu;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- kvmppc_set_host_core(cpu);
- break;
-
-#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- kvmppc_clear_host_core(cpu);
- break;
-#endif
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block kvmppc_cpu_notifier = {
- .notifier_call = kvmppc_cpu_notify,
-};
-
/*
* Allocate a per-core structure for managing state about which cores are
* running in the host versus the guest and for exchanging data between
@@ -3185,15 +3157,17 @@ void kvmppc_alloc_host_rm_ops(void)
return;
}
- register_cpu_notifier(&kvmppc_cpu_notifier);
-
+ cpuhp_setup_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE,
+ "ppc/kvm_book3s:prepare",
+ kvmppc_set_host_core,
+ kvmppc_clear_host_core);
put_online_cpus();
}
void kvmppc_free_host_rm_ops(void)
{
if (kvmppc_host_rm_ops_hv) {
- unregister_cpu_notifier(&kvmppc_cpu_notifier);
+ cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE);
kfree(kvmppc_host_rm_ops_hv->rm_core);
kfree(kvmppc_host_rm_ops_hv);
kvmppc_host_rm_ops_hv = NULL;
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index f8d1410aa5bb..abc24501c4c0 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -64,6 +64,7 @@ config XILINX_VIRTEX_GENERIC_BOARD
default n
select XILINX_VIRTEX_II_PRO
select XILINX_VIRTEX_4_FX
+ select XILINX_INTC
help
This option enables generic support for Xilinx Virtex based boards.
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
index 91a08ea758a8..e3d5e095846b 100644
--- a/arch/powerpc/platforms/40x/virtex.c
+++ b/arch/powerpc/platforms/40x/virtex.c
@@ -48,7 +48,7 @@ define_machine(virtex) {
.probe = virtex_probe,
.setup_arch = xilinx_pci_init,
.init_IRQ = xilinx_intc_init_tree,
- .get_irq = xilinx_intc_get_irq,
+ .get_irq = xintc_get_irq,
.restart = ppc4xx_reset_system,
.calibrate_decr = generic_calibrate_decr,
};
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 8d18669856f9..9b0afe935cc1 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -241,6 +241,7 @@ config XILINX_VIRTEX440_GENERIC_BOARD
depends on 44x
default n
select XILINX_VIRTEX_5_FXT
+ select XILINX_INTC
help
This option enables generic support for Xilinx Virtex based boards
that use a 440 based processor in the Virtex 5 FXT FPGA architecture.
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c
index a7e08026097a..3eb13ed926ee 100644
--- a/arch/powerpc/platforms/44x/virtex.c
+++ b/arch/powerpc/platforms/44x/virtex.c
@@ -54,7 +54,7 @@ define_machine(virtex) {
.probe = virtex_probe,
.setup_arch = xilinx_pci_init,
.init_IRQ = xilinx_intc_init_tree,
- .get_irq = xilinx_intc_get_irq,
+ .get_irq = xintc_get_irq,
.calibrate_decr = generic_calibrate_decr,
.restart = ppc4xx_reset_system,
};
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 0f52d7955796..4a86dcff3fcd 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -29,194 +29,7 @@
#include <asm/processor.h>
#include <asm/i8259.h>
#include <asm/irq.h>
-
-/*
- * INTC Registers
- */
-#define XINTC_ISR 0 /* Interrupt Status */
-#define XINTC_IPR 4 /* Interrupt Pending */
-#define XINTC_IER 8 /* Interrupt Enable */
-#define XINTC_IAR 12 /* Interrupt Acknowledge */
-#define XINTC_SIE 16 /* Set Interrupt Enable bits */
-#define XINTC_CIE 20 /* Clear Interrupt Enable bits */
-#define XINTC_IVR 24 /* Interrupt Vector */
-#define XINTC_MER 28 /* Master Enable */
-
-static struct irq_domain *master_irqhost;
-
-#define XILINX_INTC_MAXIRQS (32)
-
-/* The following table allows the interrupt type, edge or level,
- * to be cached after being read from the device tree until the interrupt
- * is mapped
- */
-static int xilinx_intc_typetable[XILINX_INTC_MAXIRQS];
-
-/* Map the interrupt type from the device tree to the interrupt types
- * used by the interrupt subsystem
- */
-static unsigned char xilinx_intc_map_senses[] = {
- IRQ_TYPE_EDGE_RISING,
- IRQ_TYPE_EDGE_FALLING,
- IRQ_TYPE_LEVEL_HIGH,
- IRQ_TYPE_LEVEL_LOW,
-};
-
-/*
- * The interrupt controller is setup such that it doesn't work well with
- * the level interrupt handler in the kernel because the handler acks the
- * interrupt before calling the application interrupt handler. To deal with
- * that, we use 2 different irq chips so that different functions can be
- * used for level and edge type interrupts.
- *
- * IRQ Chip common (across level and edge) operations
- */
-static void xilinx_intc_mask(struct irq_data *d)
-{
- int irq = irqd_to_hwirq(d);
- void * regs = irq_data_get_irq_chip_data(d);
- pr_debug("mask: %d\n", irq);
- out_be32(regs + XINTC_CIE, 1 << irq);
-}
-
-static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type)
-{
- return 0;
-}
-
-/*
- * IRQ Chip level operations
- */
-static void xilinx_intc_level_unmask(struct irq_data *d)
-{
- int irq = irqd_to_hwirq(d);
- void * regs = irq_data_get_irq_chip_data(d);
- pr_debug("unmask: %d\n", irq);
- out_be32(regs + XINTC_SIE, 1 << irq);
-
- /* ack level irqs because they can't be acked during
- * ack function since the handle_level_irq function
- * acks the irq before calling the inerrupt handler
- */
- out_be32(regs + XINTC_IAR, 1 << irq);
-}
-
-static struct irq_chip xilinx_intc_level_irqchip = {
- .name = "Xilinx Level INTC",
- .irq_mask = xilinx_intc_mask,
- .irq_mask_ack = xilinx_intc_mask,
- .irq_unmask = xilinx_intc_level_unmask,
- .irq_set_type = xilinx_intc_set_type,
-};
-
-/*
- * IRQ Chip edge operations
- */
-static void xilinx_intc_edge_unmask(struct irq_data *d)
-{
- int irq = irqd_to_hwirq(d);
- void *regs = irq_data_get_irq_chip_data(d);
- pr_debug("unmask: %d\n", irq);
- out_be32(regs + XINTC_SIE, 1 << irq);
-}
-
-static void xilinx_intc_edge_ack(struct irq_data *d)
-{
- int irq = irqd_to_hwirq(d);
- void * regs = irq_data_get_irq_chip_data(d);
- pr_debug("ack: %d\n", irq);
- out_be32(regs + XINTC_IAR, 1 << irq);
-}
-
-static struct irq_chip xilinx_intc_edge_irqchip = {
- .name = "Xilinx Edge INTC",
- .irq_mask = xilinx_intc_mask,
- .irq_unmask = xilinx_intc_edge_unmask,
- .irq_ack = xilinx_intc_edge_ack,
- .irq_set_type = xilinx_intc_set_type,
-};
-
-/*
- * IRQ Host operations
- */
-
-/**
- * xilinx_intc_xlate - translate virq# from device tree interrupts property
- */
-static int xilinx_intc_xlate(struct irq_domain *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq,
- unsigned int *out_flags)
-{
- if ((intsize < 2) || (intspec[0] >= XILINX_INTC_MAXIRQS))
- return -EINVAL;
-
- /* keep a copy of the interrupt type til the interrupt is mapped
- */
- xilinx_intc_typetable[intspec[0]] = xilinx_intc_map_senses[intspec[1]];
-
- /* Xilinx uses 2 interrupt entries, the 1st being the h/w
- * interrupt number, the 2nd being the interrupt type, edge or level
- */
- *out_hwirq = intspec[0];
- *out_flags = xilinx_intc_map_senses[intspec[1]];
-
- return 0;
-}
-static int xilinx_intc_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t irq)
-{
- irq_set_chip_data(virq, h->host_data);
-
- if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH ||
- xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) {
- irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip,
- handle_level_irq);
- } else {
- irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip,
- handle_edge_irq);
- }
- return 0;
-}
-
-static const struct irq_domain_ops xilinx_intc_ops = {
- .map = xilinx_intc_map,
- .xlate = xilinx_intc_xlate,
-};
-
-struct irq_domain * __init
-xilinx_intc_init(struct device_node *np)
-{
- struct irq_domain * irq;
- void * regs;
-
- /* Find and map the intc registers */
- regs = of_iomap(np, 0);
- if (!regs) {
- pr_err("xilinx_intc: could not map registers\n");
- return NULL;
- }
-
- /* Setup interrupt controller */
- out_be32(regs + XINTC_IER, 0); /* disable all irqs */
- out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */
- out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */
-
- /* Allocate and initialize an irq_domain structure. */
- irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops,
- regs);
- if (!irq)
- panic(__FILE__ ": Cannot allocate IRQ host\n");
-
- return irq;
-}
-
-int xilinx_intc_get_irq(void)
-{
- void * regs = master_irqhost->host_data;
- pr_debug("get_irq:\n");
- return irq_linear_revmap(master_irqhost, in_be32(regs + XINTC_IVR));
-}
+#include <linux/irqchip.h>
#if defined(CONFIG_PPC_I8259)
/*
@@ -265,31 +78,11 @@ static void __init xilinx_i8259_setup_cascade(void)
static inline void xilinx_i8259_setup_cascade(void) { return; }
#endif /* defined(CONFIG_PPC_I8259) */
-static const struct of_device_id xilinx_intc_match[] __initconst = {
- { .compatible = "xlnx,opb-intc-1.00.c", },
- { .compatible = "xlnx,xps-intc-1.00.a", },
- {}
-};
-
/*
* Initialize master Xilinx interrupt controller
*/
void __init xilinx_intc_init_tree(void)
{
- struct device_node *np;
-
- /* find top level interrupt controller */
- for_each_matching_node(np, xilinx_intc_match) {
- if (!of_get_property(np, "interrupts", NULL))
- break;
- }
- BUG_ON(!np);
-
- master_irqhost = xilinx_intc_init(np);
- BUG_ON(!master_irqhost);
-
- irq_set_default_host(master_irqhost);
- of_node_put(np);
-
+ irqchip_init();
xilinx_i8259_setup_cascade();
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2d05efa31df3..c6722112527d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -173,6 +173,7 @@ config S390
select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
+ select ARCH_HAS_SCALED_CPUTIME
select VIRT_TO_BUS
select HAVE_NMI
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/s390/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9c00351f1545..6bca916a5ba0 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -240,9 +240,10 @@ static inline unsigned short stap(void)
/*
* Give up the time slice of the virtual PU.
*/
-void cpu_relax(void);
+#define cpu_relax_yield cpu_relax_yield
+void cpu_relax_yield(void);
-#define cpu_relax_lowlatency() barrier()
+#define cpu_relax() barrier()
#define ECAG_CACHE_ATTRIBUTE 0
#define ECAG_CPU_ATTRIBUTE 1
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 7e9e09f600fa..7ecd8902a5c3 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -23,6 +23,14 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
return __sync_bool_compare_and_swap(lock, old, new);
}
+#ifndef CONFIG_SMP
+static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
+#else
+bool arch_vcpu_is_preempted(int cpu);
+#endif
+
+#define vcpu_is_preempted arch_vcpu_is_preempted
+
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 81d0808085e6..9e60ef144d03 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -53,7 +53,7 @@ void s390_update_cpu_mhz(void)
on_each_cpu(update_cpu_mhz, NULL, 0);
}
-void notrace cpu_relax(void)
+void notrace cpu_relax_yield(void)
{
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
diag_stat_inc(DIAG_STAT_X044);
@@ -61,7 +61,7 @@ void notrace cpu_relax(void)
}
barrier();
}
-EXPORT_SYMBOL(cpu_relax);
+EXPORT_SYMBOL(cpu_relax_yield);
/*
* cpu_init - initializes state that is per-CPU.
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5d53ab646b3f..e49f61aadaf9 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -367,10 +367,15 @@ int smp_find_processor_id(u16 address)
return -1;
}
-int smp_vcpu_scheduled(int cpu)
+bool arch_vcpu_is_preempted(int cpu)
{
- return pcpu_running(pcpu_devices + cpu);
+ if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
+ return false;
+ if (pcpu_running(pcpu_devices + cpu))
+ return false;
+ return true;
}
+EXPORT_SYMBOL(arch_vcpu_is_preempted);
void smp_yield_cpu(int cpu)
{
@@ -1040,22 +1045,18 @@ static struct attribute_group cpu_online_attr_group = {
.attrs = cpu_online_attrs,
};
-static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
+static int smp_cpu_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned int)(long)hcpu;
struct device *s = &per_cpu(cpu_device, cpu)->dev;
- int err = 0;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
- break;
- case CPU_DEAD:
- sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
- break;
- }
- return notifier_from_errno(err);
+ return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+}
+static int smp_cpu_pre_down(unsigned int cpu)
+{
+ struct device *s = &per_cpu(cpu_device, cpu)->dev;
+
+ sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
+ return 0;
}
static int smp_add_present_cpu(int cpu)
@@ -1076,20 +1077,12 @@ static int smp_add_present_cpu(int cpu)
rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
if (rc)
goto out_cpu;
- if (cpu_online(cpu)) {
- rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
- if (rc)
- goto out_online;
- }
rc = topology_cpu_init(c);
if (rc)
goto out_topology;
return 0;
out_topology:
- if (cpu_online(cpu))
- sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
-out_online:
sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
out_cpu:
#ifdef CONFIG_HOTPLUG_CPU
@@ -1143,17 +1136,15 @@ static int __init s390_smp_init(void)
if (rc)
return rc;
#endif
- cpu_notifier_register_begin();
for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu);
if (rc)
goto out;
}
- __hotcpu_notifier(smp_cpu_notify, 0);
-
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
+ smp_cpu_online, smp_cpu_pre_down);
out:
- cpu_notifier_register_done();
return rc;
}
subsys_initcall(s390_smp_init);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 9a6c95761ad2..6b246aadf311 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -136,8 +136,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
user_scaled = (user_scaled * mult) / div;
system_scaled = (system_scaled * mult) / div;
}
- account_user_time(tsk, user, user_scaled);
- account_system_time(tsk, hardirq_offset, system, system_scaled);
+ account_user_time(tsk, user);
+ tsk->utimescaled += user_scaled;
+ account_system_time(tsk, hardirq_offset, system);
+ tsk->stimescaled += system_scaled;
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
@@ -196,7 +198,8 @@ void vtime_account_irq_enter(struct task_struct *tsk)
system_scaled = (system_scaled * mult) / div;
}
- account_system_time(tsk, 0, system, system_scaled);
+ account_system_time(tsk, 0, system);
+ tsk->stimescaled += system_scaled;
virt_timer_forward(system);
}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index e5f50a7d2f4e..e48a48ec24bc 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -37,15 +37,6 @@ static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
}
-static inline int cpu_is_preempted(int cpu)
-{
- if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
- return 0;
- if (smp_vcpu_scheduled(cpu))
- return 0;
- return 1;
-}
-
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -62,7 +53,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
continue;
}
/* First iteration: check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -81,7 +72,7 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -108,7 +99,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
continue;
}
/* Check if the lock owner is running. */
- if (first_diag && cpu_is_preempted(~owner)) {
+ if (first_diag && arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
continue;
@@ -127,7 +118,7 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
* yield the CPU unconditionally. For LPAR rely on the
* sense running status.
*/
- if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
+ if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
smp_yield_cpu(~owner);
first_diag = 0;
}
@@ -165,7 +156,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -211,7 +202,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -241,7 +232,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
owner = 0;
while (1) {
if (count-- <= 0) {
- if (owner && cpu_is_preempted(~owner))
+ if (owner && arch_vcpu_is_preempted(~owner))
smp_yield_cpu(~owner);
count = spin_retry;
}
@@ -285,7 +276,7 @@ void arch_lock_relax(unsigned int cpu)
{
if (!cpu)
return;
- if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
+ if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
return;
smp_yield_cpu(~cpu);
}
diff --git a/arch/score/include/asm/mutex.h b/arch/score/include/asm/mutex.h
deleted file mode 100644
index 10d48fe4db97..000000000000
--- a/arch/score/include/asm/mutex.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_SCORE_MUTEX_H
-#define _ASM_SCORE_MUTEX_H
-
-#include <asm-generic/mutex-dec.h>
-
-#endif /* _ASM_SCORE_MUTEX_H */
diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h
index 851f441991d2..d9a922d8711b 100644
--- a/arch/score/include/asm/processor.h
+++ b/arch/score/include/asm/processor.h
@@ -24,7 +24,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define release_thread(thread) do {} while (0)
/*
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h
deleted file mode 100644
index dad29b687bd3..000000000000
--- a/arch/sh/include/asm/mutex-llsc.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * arch/sh/include/asm/mutex-llsc.h
- *
- * SH-4A optimized mutex locking primitives
- *
- * Please look into asm-generic/mutex-xchg.h for a formal definition.
- */
-#ifndef __ASM_SH_MUTEX_LLSC_H
-#define __ASM_SH_MUTEX_LLSC_H
-
-/*
- * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
- * with a bastardized atomic decrement (it is not a reliable atomic decrement
- * but it satisfies the defined semantics for our purpose, while being
- * smaller and faster than a real atomic decrement or atomic swap.
- * The idea is to attempt decrementing the lock value only once. If once
- * decremented it isn't zero, or if its store-back fails due to a dispute
- * on the exclusive store, we simply bail out immediately through the slow
- * path where the lock will be reattempted until it succeeds.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __done, __res;
-
- __asm__ __volatile__ (
- "movli.l @%2, %0 \n"
- "add #-1, %0 \n"
- "movco.l %0, @%2 \n"
- "movt %1 \n"
- : "=&z" (__res), "=&r" (__done)
- : "r" (&(count)->counter)
- : "t");
-
- if (unlikely(!__done || __res != 0))
- fail_fn(count);
-}
-
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- int __done, __res;
-
- __asm__ __volatile__ (
- "movli.l @%2, %0 \n"
- "add #-1, %0 \n"
- "movco.l %0, @%2 \n"
- "movt %1 \n"
- : "=&z" (__res), "=&r" (__done)
- : "r" (&(count)->counter)
- : "t");
-
- if (unlikely(!__done || __res != 0))
- __res = -1;
-
- return __res;
-}
-
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- int __done, __res;
-
- __asm__ __volatile__ (
- "movli.l @%2, %0 \n\t"
- "add #1, %0 \n\t"
- "movco.l %0, @%2 \n\t"
- "movt %1 \n\t"
- : "=&z" (__res), "=&r" (__done)
- : "r" (&(count)->counter)
- : "t");
-
- if (unlikely(!__done || __res <= 0))
- fail_fn(count);
-}
-
-/*
- * If the unlock was done on a contended lock, or if the unlock simply fails
- * then the mutex remains locked.
- */
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/*
- * For __mutex_fastpath_trylock we do an atomic decrement and check the
- * result and put it in the __res variable.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int __res, __orig;
-
- __asm__ __volatile__ (
- "1: movli.l @%2, %0 \n\t"
- "dt %0 \n\t"
- "movco.l %0,@%2 \n\t"
- "bf 1b \n\t"
- "cmp/eq #0,%0 \n\t"
- "bt 2f \n\t"
- "mov #0, %1 \n\t"
- "bf 3f \n\t"
- "2: mov #1, %1 \n\t"
- "3: "
- : "=&z" (__orig), "=&r" (__res)
- : "r" (&count->counter)
- : "t");
-
- return __res;
-}
-#endif /* __ASM_SH_MUTEX_LLSC_H */
diff --git a/arch/sh/include/asm/mutex.h b/arch/sh/include/asm/mutex.h
deleted file mode 100644
index d8e37716a4a0..000000000000
--- a/arch/sh/include/asm/mutex.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-#if defined(CONFIG_CPU_SH4A)
-#include <asm/mutex-llsc.h>
-#else
-#include <asm-generic/mutex-dec.h>
-#endif
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index f9a09942a32d..5addd69f70ef 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -97,7 +97,6 @@ extern struct sh_cpuinfo cpu_data[];
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
void stop_this_cpu(void *);
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index cfc918067f80..0569bfac4afb 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -15,7 +15,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += module.h
-generic-y += mutex.h
generic-y += preempt.h
generic-y += rwsem.h
generic-y += serial.h
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 812fd08f3e62..365d4cb267b4 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -119,7 +119,6 @@ extern struct task_struct *last_task_used_math;
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
extern void (*sparc_idle)(void);
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h
index ce2595c89471..6448cfc8292f 100644
--- a/arch/sparc/include/asm/processor_64.h
+++ b/arch/sparc/include/asm/processor_64.h
@@ -216,7 +216,6 @@ unsigned long get_wchan(struct task_struct *task);
"nop\n\t" \
".previous" \
::: "memory")
-#define cpu_relax_lowlatency() cpu_relax()
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index fa8e21abb5e0..4808b6d23455 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -221,7 +221,7 @@ static struct device_attribute cpu_core_attrs[] = {
static DEFINE_PER_CPU(struct cpu, cpu_devices);
-static void register_cpu_online(unsigned int cpu)
+static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
@@ -231,11 +231,12 @@ static void register_cpu_online(unsigned int cpu)
device_create_file(s, &cpu_core_attrs[i]);
register_mmu_stats(s);
+ return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-static void unregister_cpu_online(unsigned int cpu)
+static int unregister_cpu_online(unsigned int cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct device *s = &c->dev;
int i;
@@ -243,33 +244,10 @@ static void unregister_cpu_online(unsigned int cpu)
unregister_mmu_stats(s);
for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++)
device_remove_file(s, &cpu_core_attrs[i]);
-}
-#endif
-
-static int sysfs_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned int)(long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- register_cpu_online(cpu);
- break;
-#ifdef CONFIG_HOTPLUG_CPU
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- unregister_cpu_online(cpu);
- break;
#endif
- }
- return NOTIFY_OK;
+ return 0;
}
-static struct notifier_block sysfs_cpu_nb = {
- .notifier_call = sysfs_cpu_notify,
-};
-
static void __init check_mmu_stats(void)
{
unsigned long dummy1, err;
@@ -294,26 +272,21 @@ static void register_nodes(void)
static int __init topology_init(void)
{
- int cpu;
+ int cpu, ret;
register_nodes();
check_mmu_stats();
- cpu_notifier_register_begin();
-
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
register_cpu(c, cpu);
- if (cpu_online(cpu))
- register_cpu_online(cpu);
}
- __register_cpu_notifier(&sysfs_cpu_nb);
-
- cpu_notifier_register_done();
-
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "sparc/topology:online",
+ register_cpu_online, unregister_cpu_online);
+ WARN_ON(ret < 0);
return 0;
}
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index ba35c41c71ff..2d1f5638974c 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -21,7 +21,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msgbuf.h
-generic-y += mutex.h
generic-y += param.h
generic-y += parport.h
generic-y += poll.h
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 0684e88aacd8..0bc9968b97a1 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -264,8 +264,6 @@ static inline void cpu_relax(void)
barrier();
}
-#define cpu_relax_lowlatency() cpu_relax()
-
/* Info on this processor (see fs/proc/cpuinfo.c) */
struct seq_operations;
extern const struct seq_operations cpuinfo_op;
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 904f3ebf4220..052f7f6d0551 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -17,7 +17,6 @@ generic-y += irq_work.h
generic-y += kdebug.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
-generic-y += mutex.h
generic-y += param.h
generic-y += pci.h
generic-y += percpu.h
diff --git a/arch/unicore32/include/asm/mutex.h b/arch/unicore32/include/asm/mutex.h
deleted file mode 100644
index fab7d0e8adf6..000000000000
--- a/arch/unicore32/include/asm/mutex.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * linux/arch/unicore32/include/asm/mutex.h
- *
- * Code specific to PKUnity SoC and UniCore ISA
- *
- * Copyright (C) 2001-2010 GUAN Xue-tao
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * UniCore optimized mutex locking primitives
- *
- * Please look into asm-generic/mutex-xchg.h for a formal definition.
- */
-#ifndef __UNICORE_MUTEX_H__
-#define __UNICORE_MUTEX_H__
-
-# include <asm-generic/mutex-xchg.h>
-#endif
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
index 8d21b7adf26b..4eaa42167667 100644
--- a/arch/unicore32/include/asm/processor.h
+++ b/arch/unicore32/include/asm/processor.h
@@ -71,7 +71,6 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 19d237b0737d..215612cfb955 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -9,28 +9,50 @@ config 64BIT
config X86_32
def_bool y
depends on !64BIT
+ # Options that are inherently 32-bit kernel only:
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select CLKSRC_I8253
+ select CLONE_BACKWARDS
+ select HAVE_AOUT
+ select HAVE_GENERIC_DMA_COHERENT
+ select MODULES_USE_ELF_REL
+ select OLD_SIGACTION
config X86_64
def_bool y
depends on 64BIT
+ # Options that are inherently 64-bit kernel only:
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_SUPPORTS_INT128
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select HAVE_ARCH_SOFT_DIRTY
+ select MODULES_USE_ELF_RELA
+ select X86_DEV_DMA_OPS
-### Arch settings
+#
+# Arch settings
+#
+# ( Note that options that are marked 'if X86_64' could in principle be
+# ported to 32-bit as well. )
+#
config X86
def_bool y
+ #
+ # Note: keep this list sorted alphabetically
+ #
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ANON_INODES
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK
- select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_GIGANTIC_PAGE if X86_64
select ARCH_HAS_KCOV if X86_64
- select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_MMIO_FLUSH
+ select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -39,23 +61,17 @@ config X86
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
- select ARCH_SUPPORTS_INT128 if X86_64
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_USE_BUILTIN_BSWAP
- select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
- select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS
- select ARCH_WANT_IPC_PARSE_VERSION if X86_32
+ select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select BUILDTIME_EXTABLE_SORT
select CLKEVT_I8253
- select CLKSRC_I8253 if X86_32
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
select CLOCKSOURCE_WATCHDOG
- select CLONE_BACKWARDS if X86_32
- select COMPAT_OLD_SIGACTION if IA32_EMULATION
select DCACHE_WORD_ACCESS
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
@@ -77,7 +93,6 @@ config X86
select HAVE_ACPI_APEI if ACPI
select HAVE_ACPI_APEI_NMI if ACPI
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
- select HAVE_AOUT if X86_32
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
@@ -88,12 +103,10 @@ config X86
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER
- select HAVE_ARCH_SOFT_DIRTY if X86_64
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
- select HAVE_ARCH_WITHIN_STACK_FRAMES
- select HAVE_EBPF_JIT if X86_64
select HAVE_ARCH_VMAP_STACK if X86_64
+ select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
@@ -106,6 +119,7 @@ config X86
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_EBPF_JIT if X86_64
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EXIT_THREAD
select HAVE_FENTRY if X86_64
@@ -113,7 +127,6 @@ config X86
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS
- select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_HW_BREAKPOINT
select HAVE_IDE
select HAVE_IOREMAP_PROT
@@ -142,15 +155,11 @@ config X86
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_STACK_VALIDATION if X86_64
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_UID16 if X86_32 || IA32_EMULATION
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_USER_RETURN_NOTIFIER
select IRQ_FORCED_THREADING
- select MODULES_USE_ELF_RELA if X86_64
- select MODULES_USE_ELF_REL if X86_32
- select OLD_SIGACTION if X86_32
- select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
select PERF_EVENTS
select RTC_LIB
select RTC_MC146818_LIB
@@ -160,11 +169,7 @@ config X86
select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
- select X86_DEV_DMA_OPS if X86_64
select X86_FEATURE_NAMES if PROC_FS
- select HAVE_STACK_VALIDATION if X86_64
- select ARCH_USES_HIGH_VMA_FLAGS if X86_INTEL_MEMORY_PROTECTION_KEYS
- select ARCH_HAS_PKEYS if X86_INTEL_MEMORY_PROTECTION_KEYS
config INSTRUCTION_DECODER
def_bool y
@@ -407,6 +412,19 @@ config GOLDFISH
def_bool y
depends on X86_GOLDFISH
+config INTEL_RDT_A
+ bool "Intel Resource Director Technology Allocation support"
+ default n
+ depends on X86 && CPU_SUP_INTEL
+ select KERNFS
+ help
+ Select to enable resource allocation which is a sub-feature of
+ Intel Resource Director Technology(RDT). More information about
+ RDT can be found in the Intel x86 Architecture Software
+ Developer Manual.
+
+ Say N if unsure.
+
if X86_32
config X86_EXTENDED_PLATFORM
bool "Support for extended (non-PC) x86 platforms"
@@ -939,6 +957,27 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
+config SCHED_MC_PRIO
+ bool "CPU core priorities scheduler support"
+ depends on SCHED_MC && CPU_SUP_INTEL
+ select X86_INTEL_PSTATE
+ select CPU_FREQ
+ default y
+ ---help---
+ Intel Turbo Boost Max Technology 3.0 enabled CPUs have a
+ core ordering determined at manufacturing time, which allows
+ certain cores to reach higher turbo frequencies (when running
+ single threaded workloads) than others.
+
+ Enabling this kernel feature teaches the scheduler about
+ the TBM3 (aka ITMT) priority order of the CPU cores and adjusts the
+ scheduler's CPU selection logic accordingly, so that higher
+ overall system performance can be achieved.
+
+ This feature will have no effect on CPUs without this feature.
+
+ If unsure say Y here.
+
source "kernel/Kconfig.preempt"
config UP_LATE_INIT
@@ -1025,7 +1064,7 @@ config X86_MCE_INTEL
config X86_MCE_AMD
def_bool y
prompt "AMD MCE features"
- depends on X86_MCE && X86_LOCAL_APIC
+ depends on X86_MCE && X86_LOCAL_APIC && AMD_NB
---help---
Additional support for AMD specific MCE features such as
the DRAM Error Threshold.
@@ -1737,6 +1776,8 @@ config X86_INTEL_MEMORY_PROTECTION_KEYS
def_bool y
# Note: only available in 64-bit mode
depends on CPU_SUP_INTEL && X86_64
+ select ARCH_USES_HIGH_VMA_FLAGS
+ select ARCH_HAS_PKEYS
---help---
Memory Protection Keys provides a mechanism for enforcing
page-based protections, but without requiring modification of the
@@ -2092,7 +2133,7 @@ config DEBUG_HOTPLUG_CPU0
config COMPAT_VDSO
def_bool n
prompt "Disable the 32-bit vDSO (needed for glibc 2.3.3)"
- depends on X86_32 || IA32_EMULATION
+ depends on COMPAT_32
---help---
Certain buggy versions of glibc will crash if they are
presented with a 32-bit vDSO that is not mapped at the address
@@ -2694,9 +2735,10 @@ source "fs/Kconfig.binfmt"
config IA32_EMULATION
bool "IA32 Emulation"
depends on X86_64
+ select ARCH_WANT_OLD_COMPAT_IPC
select BINFMT_ELF
select COMPAT_BINFMT_ELF
- select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT_OLD_SIGACTION
---help---
Include code to run legacy 32-bit programs under a
64-bit kernel. You should likely turn this on, unless you're
@@ -2721,6 +2763,12 @@ config X86_X32
elf32_x86_64 support enabled to compile a kernel with this
option set.
+config COMPAT_32
+ def_bool y
+ depends on IA32_EMULATION || X86_32
+ select HAVE_UID16
+ select OLD_SIGSUSPEND3
+
config COMPAT
def_bool y
depends on IA32_EMULATION || X86_X32
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 12ea8f8384f4..0d810fb15eac 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -65,7 +65,7 @@ clean-files += cpustr.h
# ---------------------------------------------------------------------------
-KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
+KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
UBSAN_SANITIZE := n
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index cc69e37548db..ff01c8fc76f7 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -537,6 +537,69 @@ free_handle:
efi_call_early(free_pool, pci_handle);
}
+static void retrieve_apple_device_properties(struct boot_params *boot_params)
+{
+ efi_guid_t guid = APPLE_PROPERTIES_PROTOCOL_GUID;
+ struct setup_data *data, *new;
+ efi_status_t status;
+ u32 size = 0;
+ void *p;
+
+ status = efi_call_early(locate_protocol, &guid, NULL, &p);
+ if (status != EFI_SUCCESS)
+ return;
+
+ if (efi_table_attr(apple_properties_protocol, version, p) != 0x10000) {
+ efi_printk(sys_table, "Unsupported properties proto version\n");
+ return;
+ }
+
+ efi_call_proto(apple_properties_protocol, get_all, p, NULL, &size);
+ if (!size)
+ return;
+
+ do {
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+ size + sizeof(struct setup_data), &new);
+ if (status != EFI_SUCCESS) {
+ efi_printk(sys_table,
+ "Failed to alloc mem for properties\n");
+ return;
+ }
+
+ status = efi_call_proto(apple_properties_protocol, get_all, p,
+ new->data, &size);
+
+ if (status == EFI_BUFFER_TOO_SMALL)
+ efi_call_early(free_pool, new);
+ } while (status == EFI_BUFFER_TOO_SMALL);
+
+ new->type = SETUP_APPLE_PROPERTIES;
+ new->len = size;
+ new->next = 0;
+
+ data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
+ if (!data)
+ boot_params->hdr.setup_data = (unsigned long)new;
+ else {
+ while (data->next)
+ data = (struct setup_data *)(unsigned long)data->next;
+ data->next = (unsigned long)new;
+ }
+}
+
+static void setup_quirks(struct boot_params *boot_params)
+{
+ efi_char16_t const apple[] = { 'A', 'p', 'p', 'l', 'e', 0 };
+ efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long)
+ efi_table_attr(efi_system_table, fw_vendor, sys_table);
+
+ if (!memcmp(fw_vendor, apple, sizeof(apple))) {
+ if (IS_ENABLED(CONFIG_APPLE_PROPERTIES))
+ retrieve_apple_device_properties(boot_params);
+ }
+}
+
static efi_status_t
setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
{
@@ -1098,6 +1161,8 @@ struct boot_params *efi_main(struct efi_config *c,
setup_efi_pci(boot_params);
+ setup_quirks(boot_params);
+
status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
sizeof(*gdt), (void **)&gdt);
if (status != EFI_SUCCESS) {
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index efdfba21a5b2..4d85e600db78 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -119,8 +119,7 @@ ENTRY(startup_32)
*/
/* Load new GDT with the 64bit segments using 32bit descriptor */
- leal gdt(%ebp), %eax
- movl %eax, gdt+2(%ebp)
+ addl %ebp, gdt+2(%ebp)
lgdt gdt(%ebp)
/* Enable PAE mode */
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 0857b1a1de3b..c194d5717ae5 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -48,26 +48,13 @@
#ifdef CONFIG_X86_64
/*
* use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
* for fpu state save/restore overhead.
*/
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
+#define CRC32C_PCL_BREAKEVEN 512
asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#if defined(X86_FEATURE_EAGER_FPU)
-#define set_pcl_breakeven_point() \
-do { \
- if (!use_eager_fpu()) \
- crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
-} while (0)
-#else
-#define set_pcl_breakeven_point() \
- (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
-#endif
#endif /* CONFIG_X86_64 */
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -190,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
@@ -202,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
@@ -261,7 +248,6 @@ static int __init crc32c_intel_mod_init(void)
alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest;
- set_pcl_breakeven_point();
}
#endif
return crypto_register_shash(&alg);
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 9a9e5884066c..05ed3d393da7 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -90,8 +90,8 @@ For 32-bit we have the following conventions - kernel is built with
#define SIZEOF_PTREGS 21*8
- .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
- addq $-(15*8+\addskip), %rsp
+ .macro ALLOC_PT_GPREGS_ON_STACK
+ addq $-(15*8), %rsp
.endm
.macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
@@ -147,15 +147,6 @@ For 32-bit we have the following conventions - kernel is built with
movq 5*8+\offset(%rsp), %rbx
.endm
- .macro ZERO_EXTRA_REGS
- xorl %r15d, %r15d
- xorl %r14d, %r14d
- xorl %r13d, %r13d
- xorl %r12d, %r12d
- xorl %ebp, %ebp
- xorl %ebx, %ebx
- .endm
-
.macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
.if \rstor_r11
movq 6*8(%rsp), %r11
@@ -201,6 +192,26 @@ For 32-bit we have the following conventions - kernel is built with
.byte 0xf1
.endm
+/*
+ * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
+ * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
+ * is just setting the LSB, which makes it an invalid stack address and is also
+ * a signal to the unwinder that it's a pt_regs pointer in disguise.
+ *
+ * NOTE: This macro must be used *after* SAVE_EXTRA_REGS because it corrupts
+ * the original rbp.
+ */
+.macro ENCODE_FRAME_POINTER ptregs_offset=0
+#ifdef CONFIG_FRAME_POINTER
+ .if \ptregs_offset
+ leaq \ptregs_offset(%rsp), %rbp
+ .else
+ mov %rsp, %rbp
+ .endif
+ orq $0x1, %rbp
+#endif
+.endm
+
#endif /* CONFIG_X86_64 */
/*
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 21b352a11b49..acc0c6f36f3f 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -45,6 +45,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
+#include <asm/frame.h>
.section .entry.text, "ax"
@@ -175,6 +176,22 @@
SET_KERNEL_GS %edx
.endm
+/*
+ * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
+ * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
+ * is just setting the LSB, which makes it an invalid stack address and is also
+ * a signal to the unwinder that it's a pt_regs pointer in disguise.
+ *
+ * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
+ * original rbp.
+ */
+.macro ENCODE_FRAME_POINTER
+#ifdef CONFIG_FRAME_POINTER
+ mov %esp, %ebp
+ orl $0x1, %ebp
+#endif
+.endm
+
.macro RESTORE_INT_REGS
popl %ebx
popl %ecx
@@ -238,6 +255,23 @@ ENTRY(__switch_to_asm)
END(__switch_to_asm)
/*
+ * The unwinder expects the last frame on the stack to always be at the same
+ * offset from the end of the page, which allows it to validate the stack.
+ * Calling schedule_tail() directly would break that convention because its an
+ * asmlinkage function so its argument has to be pushed on the stack. This
+ * wrapper creates a proper "end of stack" frame header before the call.
+ */
+ENTRY(schedule_tail_wrapper)
+ FRAME_BEGIN
+
+ pushl %eax
+ call schedule_tail
+ popl %eax
+
+ FRAME_END
+ ret
+ENDPROC(schedule_tail_wrapper)
+/*
* A newly forked process directly context switches into this address.
*
* eax: prev task we switched from
@@ -245,9 +279,7 @@ END(__switch_to_asm)
* edi: kernel thread arg
*/
ENTRY(ret_from_fork)
- pushl %eax
- call schedule_tail
- popl %eax
+ call schedule_tail_wrapper
testl %ebx, %ebx
jnz 1f /* kernel threads are uncommon */
@@ -307,13 +339,13 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
-need_resched:
+.Lneed_resched:
cmpl $0, PER_CPU_VAR(__preempt_count)
jnz restore_all
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
jz restore_all
call preempt_schedule_irq
- jmp need_resched
+ jmp .Lneed_resched
END(resume_kernel)
#endif
@@ -334,7 +366,7 @@ GLOBAL(__begin_SYSENTER_singlestep_region)
*/
ENTRY(xen_sysenter_target)
addl $5*4, %esp /* remove xen-provided frame */
- jmp sysenter_past_esp
+ jmp .Lsysenter_past_esp
#endif
/*
@@ -371,7 +403,7 @@ ENTRY(xen_sysenter_target)
*/
ENTRY(entry_SYSENTER_32)
movl TSS_sysenter_sp0(%esp), %esp
-sysenter_past_esp:
+.Lsysenter_past_esp:
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
pushfl /* pt_regs->flags (except IF = 0) */
@@ -504,9 +536,9 @@ ENTRY(entry_INT80_32)
restore_all:
TRACE_IRQS_IRET
-restore_all_notrace:
+.Lrestore_all_notrace:
#ifdef CONFIG_X86_ESPFIX32
- ALTERNATIVE "jmp restore_nocheck", "", X86_BUG_ESPFIX
+ ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
/*
@@ -518,22 +550,23 @@ restore_all_notrace:
movb PT_CS(%esp), %al
andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
- je ldt_ss # returning to user-space with LDT SS
+ je .Lldt_ss # returning to user-space with LDT SS
#endif
-restore_nocheck:
+.Lrestore_nocheck:
RESTORE_REGS 4 # skip orig_eax/error_code
-irq_return:
+.Lirq_return:
INTERRUPT_RETURN
+
.section .fixup, "ax"
ENTRY(iret_exc )
pushl $0 # no error code
pushl $do_iret_error
- jmp error_code
+ jmp common_exception
.previous
- _ASM_EXTABLE(irq_return, iret_exc)
+ _ASM_EXTABLE(.Lirq_return, iret_exc)
#ifdef CONFIG_X86_ESPFIX32
-ldt_ss:
+.Lldt_ss:
/*
* Setup and switch to ESPFIX stack
*
@@ -562,7 +595,7 @@ ldt_ss:
*/
DISABLE_INTERRUPTS(CLBR_EAX)
lss (%esp), %esp /* switch to espfix segment */
- jmp restore_nocheck
+ jmp .Lrestore_nocheck
#endif
ENDPROC(entry_INT80_32)
@@ -624,6 +657,7 @@ common_interrupt:
ASM_CLAC
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
SAVE_ALL
+ ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
movl %esp, %eax
call do_IRQ
@@ -635,6 +669,7 @@ ENTRY(name) \
ASM_CLAC; \
pushl $~(nr); \
SAVE_ALL; \
+ ENCODE_FRAME_POINTER; \
TRACE_IRQS_OFF \
movl %esp, %eax; \
call fn; \
@@ -659,7 +694,7 @@ ENTRY(coprocessor_error)
ASM_CLAC
pushl $0
pushl $do_coprocessor_error
- jmp error_code
+ jmp common_exception
END(coprocessor_error)
ENTRY(simd_coprocessor_error)
@@ -673,14 +708,14 @@ ENTRY(simd_coprocessor_error)
#else
pushl $do_simd_coprocessor_error
#endif
- jmp error_code
+ jmp common_exception
END(simd_coprocessor_error)
ENTRY(device_not_available)
ASM_CLAC
pushl $-1 # mark this as an int
pushl $do_device_not_available
- jmp error_code
+ jmp common_exception
END(device_not_available)
#ifdef CONFIG_PARAVIRT
@@ -694,59 +729,59 @@ ENTRY(overflow)
ASM_CLAC
pushl $0
pushl $do_overflow
- jmp error_code
+ jmp common_exception
END(overflow)
ENTRY(bounds)
ASM_CLAC
pushl $0
pushl $do_bounds
- jmp error_code
+ jmp common_exception
END(bounds)
ENTRY(invalid_op)
ASM_CLAC
pushl $0
pushl $do_invalid_op
- jmp error_code
+ jmp common_exception
END(invalid_op)
ENTRY(coprocessor_segment_overrun)
ASM_CLAC
pushl $0
pushl $do_coprocessor_segment_overrun
- jmp error_code
+ jmp common_exception
END(coprocessor_segment_overrun)
ENTRY(invalid_TSS)
ASM_CLAC
pushl $do_invalid_TSS
- jmp error_code
+ jmp common_exception
END(invalid_TSS)
ENTRY(segment_not_present)
ASM_CLAC
pushl $do_segment_not_present
- jmp error_code
+ jmp common_exception
END(segment_not_present)
ENTRY(stack_segment)
ASM_CLAC
pushl $do_stack_segment
- jmp error_code
+ jmp common_exception
END(stack_segment)
ENTRY(alignment_check)
ASM_CLAC
pushl $do_alignment_check
- jmp error_code
+ jmp common_exception
END(alignment_check)
ENTRY(divide_error)
ASM_CLAC
pushl $0 # no error code
pushl $do_divide_error
- jmp error_code
+ jmp common_exception
END(divide_error)
#ifdef CONFIG_X86_MCE
@@ -754,7 +789,7 @@ ENTRY(machine_check)
ASM_CLAC
pushl $0
pushl machine_check_vector
- jmp error_code
+ jmp common_exception
END(machine_check)
#endif
@@ -762,13 +797,14 @@ ENTRY(spurious_interrupt_bug)
ASM_CLAC
pushl $0
pushl $do_spurious_interrupt_bug
- jmp error_code
+ jmp common_exception
END(spurious_interrupt_bug)
#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
+ ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
/*
@@ -823,6 +859,7 @@ ENTRY(xen_failsafe_callback)
jmp iret_exc
5: pushl $-1 /* orig_ax = -1 => not a system call */
SAVE_ALL
+ ENCODE_FRAME_POINTER
jmp ret_from_exception
.section .fixup, "ax"
@@ -882,7 +919,7 @@ ftrace_call:
popl %edx
popl %ecx
popl %eax
-ftrace_ret:
+.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
@@ -952,7 +989,7 @@ GLOBAL(ftrace_regs_call)
popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
- jmp ftrace_ret
+ jmp .Lftrace_ret
popf
jmp ftrace_stub
@@ -963,7 +1000,7 @@ ENTRY(mcount)
jb ftrace_stub /* Paging not enabled yet? */
cmpl $ftrace_stub, ftrace_trace_function
- jnz trace
+ jnz .Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpl $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller
@@ -976,7 +1013,7 @@ ftrace_stub:
ret
/* taken from glibc */
-trace:
+.Ltrace:
pushl %eax
pushl %ecx
pushl %edx
@@ -1027,7 +1064,7 @@ return_to_handler:
ENTRY(trace_page_fault)
ASM_CLAC
pushl $trace_do_page_fault
- jmp error_code
+ jmp common_exception
END(trace_page_fault)
#endif
@@ -1035,7 +1072,10 @@ ENTRY(page_fault)
ASM_CLAC
pushl $do_page_fault
ALIGN
-error_code:
+ jmp common_exception
+END(page_fault)
+
+common_exception:
/* the function address is in %gs's slot on the stack */
pushl %fs
pushl %es
@@ -1047,6 +1087,7 @@ error_code:
pushl %edx
pushl %ecx
pushl %ebx
+ ENCODE_FRAME_POINTER
cld
movl $(__KERNEL_PERCPU), %ecx
movl %ecx, %fs
@@ -1064,7 +1105,7 @@ error_code:
movl %esp, %eax # pt_regs pointer
call *%edi
jmp ret_from_exception
-END(page_fault)
+END(common_exception)
ENTRY(debug)
/*
@@ -1079,6 +1120,7 @@ ENTRY(debug)
ASM_CLAC
pushl $-1 # mark this as an int
SAVE_ALL
+ ENCODE_FRAME_POINTER
xorl %edx, %edx # error code 0
movl %esp, %eax # pt_regs pointer
@@ -1094,11 +1136,11 @@ ENTRY(debug)
.Ldebug_from_sysenter_stack:
/* We're on the SYSENTER stack. Switch off. */
- movl %esp, %ebp
+ movl %esp, %ebx
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
TRACE_IRQS_OFF
call do_debug
- movl %ebp, %esp
+ movl %ebx, %esp
jmp ret_from_exception
END(debug)
@@ -1116,11 +1158,12 @@ ENTRY(nmi)
movl %ss, %eax
cmpw $__ESPFIX_SS, %ax
popl %eax
- je nmi_espfix_stack
+ je .Lnmi_espfix_stack
#endif
pushl %eax # pt_regs->orig_ax
SAVE_ALL
+ ENCODE_FRAME_POINTER
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
@@ -1132,21 +1175,21 @@ ENTRY(nmi)
/* Not on SYSENTER stack. */
call do_nmi
- jmp restore_all_notrace
+ jmp .Lrestore_all_notrace
.Lnmi_from_sysenter_stack:
/*
* We're on the SYSENTER stack. Switch off. No one (not even debug)
* is using the thread stack right now, so it's safe for us to use it.
*/
- movl %esp, %ebp
+ movl %esp, %ebx
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
call do_nmi
- movl %ebp, %esp
- jmp restore_all_notrace
+ movl %ebx, %esp
+ jmp .Lrestore_all_notrace
#ifdef CONFIG_X86_ESPFIX32
-nmi_espfix_stack:
+.Lnmi_espfix_stack:
/*
* create the pointer to lss back
*/
@@ -1159,12 +1202,13 @@ nmi_espfix_stack:
.endr
pushl %eax
SAVE_ALL
+ ENCODE_FRAME_POINTER
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx, %edx # zero error code
call do_nmi
RESTORE_REGS
lss 12+4(%esp), %esp # back to espfix stack
- jmp irq_return
+ jmp .Lirq_return
#endif
END(nmi)
@@ -1172,6 +1216,7 @@ ENTRY(int3)
ASM_CLAC
pushl $-1 # mark this as an int
SAVE_ALL
+ ENCODE_FRAME_POINTER
TRACE_IRQS_OFF
xorl %edx, %edx # zero error code
movl %esp, %eax # pt_regs pointer
@@ -1181,14 +1226,14 @@ END(int3)
ENTRY(general_protection)
pushl $do_general_protection
- jmp error_code
+ jmp common_exception
END(general_protection)
#ifdef CONFIG_KVM_GUEST
ENTRY(async_page_fault)
ASM_CLAC
pushl $do_async_page_fault
- jmp error_code
+ jmp common_exception
END(async_page_fault)
#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index ef766a358b37..5b219707c2f2 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -38,12 +38,6 @@
#include <asm/export.h>
#include <linux/err.h>
-/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
-#include <linux/elf-em.h>
-#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
-#define __AUDIT_ARCH_64BIT 0x80000000
-#define __AUDIT_ARCH_LE 0x40000000
-
.code64
.section .entry.text, "ax"
@@ -469,6 +463,7 @@ END(irq_entries_start)
ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS
SAVE_EXTRA_REGS
+ ENCODE_FRAME_POINTER
testb $3, CS(%rsp)
jz 1f
@@ -985,6 +980,7 @@ ENTRY(xen_failsafe_callback)
ALLOC_PT_GPREGS_ON_STACK
SAVE_C_REGS
SAVE_EXTRA_REGS
+ ENCODE_FRAME_POINTER
jmp error_exit
END(xen_failsafe_callback)
@@ -1028,6 +1024,7 @@ ENTRY(paranoid_entry)
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
+ ENCODE_FRAME_POINTER 8
movl $1, %ebx
movl $MSR_GS_BASE, %ecx
rdmsr
@@ -1075,6 +1072,7 @@ ENTRY(error_entry)
cld
SAVE_C_REGS 8
SAVE_EXTRA_REGS 8
+ ENCODE_FRAME_POINTER 8
xorl %ebx, %ebx
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
@@ -1257,6 +1255,7 @@ ENTRY(nmi)
pushq %r13 /* pt_regs->r13 */
pushq %r14 /* pt_regs->r14 */
pushq %r15 /* pt_regs->r15 */
+ ENCODE_FRAME_POINTER
/*
* At this point we no longer need to worry about stack damage
@@ -1270,11 +1269,10 @@ ENTRY(nmi)
/*
* Return back to user mode. We must *not* do the normal exit
- * work, because we don't want to enable interrupts. Fortunately,
- * do_nmi doesn't modify pt_regs.
+ * work, because we don't want to enable interrupts.
*/
SWAPGS
- jmp restore_c_regs_and_iret
+ jmp restore_regs_and_iret
.Lnmi_from_kernel:
/*
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 23c881caabd1..e739002427ed 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -161,8 +161,6 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
}
text_start = addr - image->sym_vvar_start;
- current->mm->context.vdso = (void __user *)text_start;
- current->mm->context.vdso_image = image;
/*
* MAYWRITE to allow gdb to COW and set breakpoints
@@ -189,14 +187,12 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
do_munmap(mm, text_start, image->size);
+ } else {
+ current->mm->context.vdso = (void __user *)text_start;
+ current->mm->context.vdso_image = image;
}
up_fail:
- if (ret) {
- current->mm->context.vdso = NULL;
- current->mm->context.vdso_image = NULL;
- }
-
up_write(&mm->mmap_sem);
return ret;
}
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 8f82b02934fa..0c45cc8e64ba 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -7,9 +7,9 @@
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
+#include <asm/intel_rdt_common.h>
#include "../perf_event.h"
-#define MSR_IA32_PQR_ASSOC 0x0c8f
#define MSR_IA32_QM_CTR 0x0c8e
#define MSR_IA32_QM_EVTSEL 0x0c8d
@@ -24,32 +24,13 @@ static unsigned int cqm_l3_scale; /* supposedly cacheline size */
static bool cqm_enabled, mbm_enabled;
unsigned int mbm_socket_max;
-/**
- * struct intel_pqr_state - State cache for the PQR MSR
- * @rmid: The cached Resource Monitoring ID
- * @closid: The cached Class Of Service ID
- * @rmid_usecnt: The usage counter for rmid
- *
- * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
- * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
- * contains both parts, so we need to cache them.
- *
- * The cache also helps to avoid pointless updates if the value does
- * not change.
- */
-struct intel_pqr_state {
- u32 rmid;
- u32 closid;
- int rmid_usecnt;
-};
-
/*
* The cached intel_pqr_state is strictly per CPU and can never be
* updated from a remote CPU. Both functions which modify the state
* (intel_cqm_event_start and intel_cqm_event_stop) are called with
* interrupts disabled, which is sufficient for the protection.
*/
-static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
+DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
static struct hrtimer *mbm_timers;
/**
* struct sample - mbm event's (local or total) data
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index c5047b8f777b..1c1b9fe705c8 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -36,13 +36,6 @@ static DEFINE_PER_CPU(struct pt, pt_ctx);
static struct pt_pmu pt_pmu;
-enum cpuid_regs {
- CR_EAX = 0,
- CR_ECX,
- CR_EDX,
- CR_EBX
-};
-
/*
* Capabilities of Intel PT hardware, such as number of address bits or
* supported output schemes, are cached and exported to userspace as "caps"
@@ -64,21 +57,21 @@ static struct pt_cap_desc {
u8 reg;
u32 mask;
} pt_caps[] = {
- PT_CAP(max_subleaf, 0, CR_EAX, 0xffffffff),
- PT_CAP(cr3_filtering, 0, CR_EBX, BIT(0)),
- PT_CAP(psb_cyc, 0, CR_EBX, BIT(1)),
- PT_CAP(ip_filtering, 0, CR_EBX, BIT(2)),
- PT_CAP(mtc, 0, CR_EBX, BIT(3)),
- PT_CAP(ptwrite, 0, CR_EBX, BIT(4)),
- PT_CAP(power_event_trace, 0, CR_EBX, BIT(5)),
- PT_CAP(topa_output, 0, CR_ECX, BIT(0)),
- PT_CAP(topa_multiple_entries, 0, CR_ECX, BIT(1)),
- PT_CAP(single_range_output, 0, CR_ECX, BIT(2)),
- PT_CAP(payloads_lip, 0, CR_ECX, BIT(31)),
- PT_CAP(num_address_ranges, 1, CR_EAX, 0x3),
- PT_CAP(mtc_periods, 1, CR_EAX, 0xffff0000),
- PT_CAP(cycle_thresholds, 1, CR_EBX, 0xffff),
- PT_CAP(psb_periods, 1, CR_EBX, 0xffff0000),
+ PT_CAP(max_subleaf, 0, CPUID_EAX, 0xffffffff),
+ PT_CAP(cr3_filtering, 0, CPUID_EBX, BIT(0)),
+ PT_CAP(psb_cyc, 0, CPUID_EBX, BIT(1)),
+ PT_CAP(ip_filtering, 0, CPUID_EBX, BIT(2)),
+ PT_CAP(mtc, 0, CPUID_EBX, BIT(3)),
+ PT_CAP(ptwrite, 0, CPUID_EBX, BIT(4)),
+ PT_CAP(power_event_trace, 0, CPUID_EBX, BIT(5)),
+ PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
+ PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
+ PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
+ PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
+ PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
+ PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
+ PT_CAP(cycle_thresholds, 1, CPUID_EBX, 0xffff),
+ PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
};
static u32 pt_cap_get(enum pt_capabilities cap)
@@ -213,10 +206,10 @@ static int __init pt_pmu_hw_init(void)
for (i = 0; i < PT_CPUID_LEAVES; i++) {
cpuid_count(20, i,
- &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM],
- &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
+ &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
+ &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
}
ret = -ENOMEM;
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index 2cfed174e3c9..2b892e2313a9 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -6,10 +6,6 @@ generated-y += unistd_32_ia32.h
generated-y += unistd_64_x32.h
generated-y += xen-hypercalls.h
-genhdr-y += unistd_32.h
-genhdr-y += unistd_64.h
-genhdr-y += unistd_x32.h
-
generic-y += clkdev.h
generic-y += cputime.h
generic-y += dma-contiguous.h
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 5e828da2e18f..00c88a01301d 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -21,6 +21,10 @@ extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
extern int amd_set_subcaches(int, unsigned long);
+extern int amd_smn_read(u16 node, u32 address, u32 *value);
+extern int amd_smn_write(u16 node, u32 address, u32 value);
+extern int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo);
+
struct amd_l3_cache {
unsigned indices;
u8 subcaches[4];
@@ -55,6 +59,7 @@ struct threshold_bank {
};
struct amd_northbridge {
+ struct pci_dev *root;
struct pci_dev *misc;
struct pci_dev *link;
struct amd_l3_cache l3_cache;
@@ -66,7 +71,6 @@ struct amd_northbridge_info {
u64 flags;
struct amd_northbridge *nb;
};
-extern struct amd_northbridge_info amd_northbridges;
#define AMD_NB_GART BIT(0)
#define AMD_NB_L3_INDEX_DISABLE BIT(1)
@@ -74,20 +78,9 @@ extern struct amd_northbridge_info amd_northbridges;
#ifdef CONFIG_AMD_NB
-static inline u16 amd_nb_num(void)
-{
- return amd_northbridges.num;
-}
-
-static inline bool amd_nb_has_feature(unsigned feature)
-{
- return ((amd_northbridges.flags & feature) == feature);
-}
-
-static inline struct amd_northbridge *node_to_amd_nb(int node)
-{
- return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
-}
+u16 amd_nb_num(void);
+bool amd_nb_has_feature(unsigned int feature);
+struct amd_northbridge *node_to_amd_nb(int node);
static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev)
{
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index f5aaf6c83222..04f8b77ddbee 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -196,7 +196,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
{
- wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+ wrmsr_notrace(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
}
static inline u32 native_apic_msr_read(u32 reg)
@@ -332,6 +332,7 @@ struct apic {
* on write for EOI.
*/
void (*eoi_write)(u32 reg, u32 v);
+ void (*native_eoi_write)(u32 reg, u32 v);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
@@ -639,7 +640,6 @@ extern void irq_exit(void);
static inline void entering_irq(void)
{
irq_enter();
- exit_idle();
}
static inline void entering_ack_irq(void)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index a39629206864..53e27a12ceb6 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -104,8 +104,8 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
@@ -189,10 +189,14 @@
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
+#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
+#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
@@ -221,11 +225,13 @@
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
@@ -279,8 +285,10 @@
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
+#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
+#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 389d700b961e..e99675b9c861 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -210,12 +210,18 @@ static inline bool efi_is_64bit(void)
return __efi_early()->is64;
}
+#define efi_table_attr(table, attr, instance) \
+ (efi_is_64bit() ? \
+ ((table##_64_t *)(unsigned long)instance)->attr : \
+ ((table##_32_t *)(unsigned long)instance)->attr)
+
+#define efi_call_proto(protocol, f, instance, ...) \
+ __efi_early()->call(efi_table_attr(protocol, f, instance), \
+ instance, ##__VA_ARGS__)
+
#define efi_call_early(f, ...) \
- __efi_early()->call(efi_is_64bit() ? \
- ((efi_boot_services_64_t *)(unsigned long) \
- __efi_early()->boot_services)->f : \
- ((efi_boot_services_32_t *)(unsigned long) \
- __efi_early()->boot_services)->f, __VA_ARGS__)
+ __efi_early()->call(efi_table_attr(efi_boot_services, f, \
+ __efi_early()->boot_services), __VA_ARGS__)
#define __efi_call_early(f, ...) \
__efi_early()->call((unsigned long)f, __VA_ARGS__);
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index 1429a7c736db..0877ae018fc9 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -27,16 +27,6 @@ extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
/*
- * Some instructions like VIA's padlock instructions generate a spurious
- * DNA fault but don't modify SSE registers. And these instructions
- * get used from interrupt context as well. To prevent these kernel instructions
- * in interrupt context interacting wrongly with other user/kernel fpu usage, we
- * should use them only in the context of irq_ts_save/restore()
- */
-extern int irq_ts_save(void);
-extern void irq_ts_restore(int TS_state);
-
-/*
* Query the presence of one or more xfeatures. Works on any legacy CPU as well.
*
* If 'feature_name' is set then put a human-readable description of
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 2737366ea583..d4a684997497 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -60,11 +60,6 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
/*
* FPU related CPU feature flag helper routines:
*/
-static __always_inline __pure bool use_eager_fpu(void)
-{
- return static_cpu_has(X86_FEATURE_EAGER_FPU);
-}
-
static __always_inline __pure bool use_xsaveopt(void)
{
return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -484,42 +479,42 @@ extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size)
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
/*
- * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
- * on this CPU.
+ * The in-register FPU state for an FPU context on a CPU is assumed to be
+ * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
+ * matches the FPU.
*
- * This will disable any lazy FPU state restore of the current FPU state,
- * but if the current thread owns the FPU, it will still be saved by.
+ * If the FPU register state is valid, the kernel can skip restoring the
+ * FPU state from memory.
+ *
+ * Any code that clobbers the FPU registers or updates the in-memory
+ * FPU state for a task MUST let the rest of the kernel know that the
+ * FPU registers are no longer valid for this task.
+ *
+ * Either one of these invalidation functions is enough. Invalidate
+ * a resource you control: CPU if using the CPU for something else
+ * (with preemption disabled), FPU for the current task, or a task that
+ * is prevented from running by the current task.
*/
-static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+static inline void __cpu_invalidate_fpregs_state(void)
{
- per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
+ __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
-static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
-{
- return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
-}
-
-
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
+static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
{
- if (!use_eager_fpu())
- clts();
+ fpu->last_cpu = -1;
}
-static inline void __fpregs_deactivate_hw(void)
+static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
{
- if (!use_eager_fpu())
- stts();
+ return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
}
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
-static inline void __fpregs_deactivate(struct fpu *fpu)
+/*
+ * These generally need preemption protection to work,
+ * do try to avoid using these on their own:
+ */
+static inline void fpregs_deactivate(struct fpu *fpu)
{
WARN_ON_FPU(!fpu->fpregs_active);
@@ -528,8 +523,7 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
trace_x86_fpu_regs_deactivated(fpu);
}
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
-static inline void __fpregs_activate(struct fpu *fpu)
+static inline void fpregs_activate(struct fpu *fpu)
{
WARN_ON_FPU(fpu->fpregs_active);
@@ -554,51 +548,19 @@ static inline int fpregs_active(void)
}
/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
- * These generally need preemption protection to work,
- * do try to avoid using these on their own.
- */
-static inline void fpregs_activate(struct fpu *fpu)
-{
- __fpregs_activate_hw();
- __fpregs_activate(fpu);
-}
-
-static inline void fpregs_deactivate(struct fpu *fpu)
-{
- __fpregs_deactivate(fpu);
- __fpregs_deactivate_hw();
-}
-
-/*
* FPU state switching for scheduling.
*
* This is a two-stage process:
*
- * - switch_fpu_prepare() saves the old state and
- * sets the new state of the CR0.TS bit. This is
- * done within the context of the old process.
+ * - switch_fpu_prepare() saves the old state.
+ * This is done within the context of the old process.
*
* - switch_fpu_finish() restores the new state as
* necessary.
*/
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t
-switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+static inline void
+switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- fpu_switch_t fpu;
-
- /*
- * If the task has used the math, pre-load the FPU on xsave processors
- * or if the past 5 consecutive context-switches used math.
- */
- fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active &&
- (use_eager_fpu() || new_fpu->counter > 5);
-
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
@@ -608,29 +570,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
/* But leave fpu_fpregs_owner_ctx! */
old_fpu->fpregs_active = 0;
trace_x86_fpu_regs_deactivated(old_fpu);
-
- /* Don't change CR0.TS if we just switch! */
- if (fpu.preload) {
- new_fpu->counter++;
- __fpregs_activate(new_fpu);
- trace_x86_fpu_regs_activated(new_fpu);
- prefetch(&new_fpu->state);
- } else {
- __fpregs_deactivate_hw();
- }
- } else {
- old_fpu->counter = 0;
+ } else
old_fpu->last_cpu = -1;
- if (fpu.preload) {
- new_fpu->counter++;
- if (fpu_want_lazy_restore(new_fpu, cpu))
- fpu.preload = 0;
- else
- prefetch(&new_fpu->state);
- fpregs_activate(new_fpu);
- }
- }
- return fpu;
}
/*
@@ -638,15 +579,19 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
*/
/*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
+ * Set up the userspace FPU context for the new task, if the task
+ * has used the FPU.
*/
-static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
- if (fpu_switch.preload)
- copy_kernel_to_fpregs(&new_fpu->state);
+ bool preload = static_cpu_has(X86_FEATURE_FPU) &&
+ new_fpu->fpstate_active;
+
+ if (preload) {
+ if (!fpregs_state_valid(new_fpu, cpu))
+ copy_kernel_to_fpregs(&new_fpu->state);
+ fpregs_activate(new_fpu);
+ }
}
/*
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486b02f9..3c80f5b9c09d 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -322,17 +322,6 @@ struct fpu {
unsigned char fpregs_active;
/*
- * @counter:
- *
- * This counter contains the number of consecutive context switches
- * during which the FPU stays used. If this is over a threshold, the
- * lazy FPU restore logic becomes eager, to save the trap overhead.
- * This is an unsigned char so that after 256 iterations the counter
- * wraps and the context switch behavior turns lazy again; this is to
- * deal with bursty apps that only use the FPU for a short time:
- */
- unsigned char counter;
- /*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
@@ -340,29 +329,6 @@ struct fpu {
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
- *
- * After context switches there may be a (short) time period
- * during which the in-FPU hardware registers are unchanged
- * and still perfectly match this state, if the tasks
- * scheduled afterwards are not using the FPU.
- *
- * This is the 'lazy restore' window of optimization, which
- * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
- *
- * We detect whether a subsequent task uses the FPU via setting
- * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
- *
- * During this window, if the task gets scheduled again, we
- * might be able to skip having to do a restore from this
- * memory buffer to the hardware registers - at the cost of
- * incurring the overhead of #NM fault traps.
- *
- * Note that on modern CPUs that support the XSAVEOPT (or other
- * optimized XSAVE instructions), we don't use #NM traps anymore,
- * as the hardware can track whether FPU registers need saving
- * or not. On such CPUs we activate the non-lazy ('eagerfpu')
- * logic, which unconditionally saves/restores all FPU state
- * across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 430bacf73074..1b2799e0699a 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -21,21 +21,16 @@
/* Supervisor features */
#define XFEATURE_MASK_SUPERVISOR (XFEATURE_MASK_PT)
-/* Supported features which support lazy state saving */
-#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
+/* All currently supported features */
+#define XCNTXT_MASK (XFEATURE_MASK_FP | \
XFEATURE_MASK_SSE | \
XFEATURE_MASK_YMM | \
XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
- XFEATURE_MASK_Hi16_ZMM)
-
-/* Supported features which require eager state saving */
-#define XFEATURE_MASK_EAGER (XFEATURE_MASK_BNDREGS | \
- XFEATURE_MASK_BNDCSR | \
- XFEATURE_MASK_PKRU)
-
-/* All currently supported features */
-#define XCNTXT_MASK (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
+ XFEATURE_MASK_Hi16_ZMM | \
+ XFEATURE_MASK_PKRU | \
+ XFEATURE_MASK_BNDREGS | \
+ XFEATURE_MASK_BNDCSR)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index c5d1785373ed..dcebb1c634f1 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -1,22 +1,6 @@
#ifndef _ASM_X86_IDLE_H
#define _ASM_X86_IDLE_H
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
-#ifdef CONFIG_X86_64
-void enter_idle(void);
-void exit_idle(void);
-#else /* !CONFIG_X86_64 */
-static inline void enter_idle(void) { }
-static inline void exit_idle(void) { }
-static inline void __exit_idle(void) { }
-#endif /* CONFIG_X86_64 */
-
void amd_e400_remove_cpu(int cpu);
#endif /* _ASM_X86_IDLE_H */
diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
new file mode 100644
index 000000000000..95ce5c85b009
--- /dev/null
+++ b/arch/x86/include/asm/intel_rdt.h
@@ -0,0 +1,224 @@
+#ifndef _ASM_X86_INTEL_RDT_H
+#define _ASM_X86_INTEL_RDT_H
+
+#ifdef CONFIG_INTEL_RDT_A
+
+#include <linux/kernfs.h>
+#include <linux/jump_label.h>
+
+#include <asm/intel_rdt_common.h>
+
+#define IA32_L3_QOS_CFG 0xc81
+#define IA32_L3_CBM_BASE 0xc90
+#define IA32_L2_CBM_BASE 0xd10
+
+#define L3_QOS_CDP_ENABLE 0x01ULL
+
+/**
+ * struct rdtgroup - store rdtgroup's data in resctrl file system.
+ * @kn: kernfs node
+ * @rdtgroup_list: linked list for all rdtgroups
+ * @closid: closid for this rdtgroup
+ * @cpu_mask: CPUs assigned to this rdtgroup
+ * @flags: status bits
+ * @waitcount: how many cpus expect to find this
+ * group when they acquire rdtgroup_mutex
+ */
+struct rdtgroup {
+ struct kernfs_node *kn;
+ struct list_head rdtgroup_list;
+ int closid;
+ struct cpumask cpu_mask;
+ int flags;
+ atomic_t waitcount;
+};
+
+/* rdtgroup.flags */
+#define RDT_DELETED 1
+
+/* List of all resource groups */
+extern struct list_head rdt_all_groups;
+
+int __init rdtgroup_init(void);
+
+/**
+ * struct rftype - describe each file in the resctrl file system
+ * @name: file name
+ * @mode: access mode
+ * @kf_ops: operations
+ * @seq_show: show content of the file
+ * @write: write to the file
+ */
+struct rftype {
+ char *name;
+ umode_t mode;
+ struct kernfs_ops *kf_ops;
+
+ int (*seq_show)(struct kernfs_open_file *of,
+ struct seq_file *sf, void *v);
+ /*
+ * write() is the generic write callback which maps directly to
+ * kernfs write operation and overrides all other operations.
+ * Maximum write size is determined by ->max_write_len.
+ */
+ ssize_t (*write)(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+};
+
+/**
+ * struct rdt_resource - attributes of an RDT resource
+ * @enabled: Is this feature enabled on this machine
+ * @capable: Is this feature available on this machine
+ * @name: Name to use in "schemata" file
+ * @num_closid: Number of CLOSIDs available
+ * @max_cbm: Largest Cache Bit Mask allowed
+ * @min_cbm_bits: Minimum number of consecutive bits to be set
+ * in a cache bit mask
+ * @domains: All domains for this resource
+ * @num_domains: Number of domains active
+ * @msr_base: Base MSR address for CBMs
+ * @tmp_cbms: Scratch space when updating schemata
+ * @num_tmp_cbms: Number of CBMs in tmp_cbms
+ * @cache_level: Which cache level defines scope of this domain
+ * @cbm_idx_multi: Multiplier of CBM index
+ * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
+ * closid * cbm_idx_multi + cbm_idx_offset
+ */
+struct rdt_resource {
+ bool enabled;
+ bool capable;
+ char *name;
+ int num_closid;
+ int cbm_len;
+ int min_cbm_bits;
+ u32 max_cbm;
+ struct list_head domains;
+ int num_domains;
+ int msr_base;
+ u32 *tmp_cbms;
+ int num_tmp_cbms;
+ int cache_level;
+ int cbm_idx_multi;
+ int cbm_idx_offset;
+};
+
+/**
+ * struct rdt_domain - group of cpus sharing an RDT resource
+ * @list: all instances of this resource
+ * @id: unique id for this instance
+ * @cpu_mask: which cpus share this resource
+ * @cbm: array of cache bit masks (indexed by CLOSID)
+ */
+struct rdt_domain {
+ struct list_head list;
+ int id;
+ struct cpumask cpu_mask;
+ u32 *cbm;
+};
+
+/**
+ * struct msr_param - set a range of MSRs from a domain
+ * @res: The resource to use
+ * @low: Beginning index from base MSR
+ * @high: End index
+ */
+struct msr_param {
+ struct rdt_resource *res;
+ int low;
+ int high;
+};
+
+extern struct mutex rdtgroup_mutex;
+
+extern struct rdt_resource rdt_resources_all[];
+extern struct rdtgroup rdtgroup_default;
+DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
+
+int __init rdtgroup_init(void);
+
+enum {
+ RDT_RESOURCE_L3,
+ RDT_RESOURCE_L3DATA,
+ RDT_RESOURCE_L3CODE,
+ RDT_RESOURCE_L2,
+
+ /* Must be the last */
+ RDT_NUM_RESOURCES,
+};
+
+#define for_each_capable_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->capable)
+
+#define for_each_enabled_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++) \
+ if (r->enabled)
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EAX */
+union cpuid_0x10_1_eax {
+ struct {
+ unsigned int cbm_len:5;
+ } split;
+ unsigned int full;
+};
+
+/* CPUID.(EAX=10H, ECX=ResID=1).EDX */
+union cpuid_0x10_1_edx {
+ struct {
+ unsigned int cos_max:16;
+ } split;
+ unsigned int full;
+};
+
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+
+void rdt_cbm_update(void *arg);
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
+void rdtgroup_kn_unlock(struct kernfs_node *kn);
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off);
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v);
+
+/*
+ * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
+ *
+ * Following considerations are made so that this has minimal impact
+ * on scheduler hot path:
+ * - This will stay as no-op unless we are running on an Intel SKU
+ * which supports resource control and we enable by mounting the
+ * resctrl file system.
+ * - Caches the per cpu CLOSid values and does the MSR write only
+ * when a task with a different CLOSid is scheduled in.
+ *
+ * Must be called with preemption disabled.
+ */
+static inline void intel_rdt_sched_in(void)
+{
+ if (static_branch_likely(&rdt_enable_key)) {
+ struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+ int closid;
+
+ /*
+ * If this task has a closid assigned, use it.
+ * Else use the closid assigned to this cpu.
+ */
+ closid = current->closid;
+ if (closid == 0)
+ closid = this_cpu_read(cpu_closid);
+
+ if (closid != state->closid) {
+ state->closid = closid;
+ wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
+ }
+ }
+}
+
+#else
+
+static inline void intel_rdt_sched_in(void) {}
+
+#endif /* CONFIG_INTEL_RDT_A */
+#endif /* _ASM_X86_INTEL_RDT_H */
diff --git a/arch/x86/include/asm/intel_rdt_common.h b/arch/x86/include/asm/intel_rdt_common.h
new file mode 100644
index 000000000000..b31081b89407
--- /dev/null
+++ b/arch/x86/include/asm/intel_rdt_common.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_X86_INTEL_RDT_COMMON_H
+#define _ASM_X86_INTEL_RDT_COMMON_H
+
+#define MSR_IA32_PQR_ASSOC 0x0c8f
+
+/**
+ * struct intel_pqr_state - State cache for the PQR MSR
+ * @rmid: The cached Resource Monitoring ID
+ * @closid: The cached Class Of Service ID
+ * @rmid_usecnt: The usage counter for rmid
+ *
+ * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
+ * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
+ * contains both parts, so we need to cache them.
+ *
+ * The cache also helps to avoid pointless updates if the value does
+ * not change.
+ */
+struct intel_pqr_state {
+ u32 rmid;
+ u32 closid;
+ int rmid_usecnt;
+};
+
+DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+
+#endif /* _ASM_X86_INTEL_RDT_COMMON_H */
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index d31881188431..29a594a3b82a 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -21,7 +21,6 @@ enum die_val {
DIE_NMIUNKNOWN,
};
-extern void printk_address(unsigned long address);
extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_stack_regs(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index ef01fef3eebc..6c119cfae218 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -9,7 +9,6 @@
#define LHCALL_FLUSH_TLB 5
#define LHCALL_LOAD_IDT_ENTRY 6
#define LHCALL_SET_STACK 7
-#define LHCALL_TS 8
#define LHCALL_SET_CLOCKEVENT 9
#define LHCALL_HALT 10
#define LHCALL_SET_PMD 13
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 9bd7ff5ffbcc..29e873686767 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -252,8 +252,10 @@ static inline void cmci_recheck(void) {}
#ifdef CONFIG_X86_MCE_AMD
void mce_amd_feature_init(struct cpuinfo_x86 *c);
+int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr);
#else
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
+static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr) { return -EINVAL; };
#endif
int mce_available(struct cpuinfo_x86 *c);
@@ -293,9 +295,7 @@ void do_machine_check(struct pt_regs *, long);
/*
* Threshold handler
*/
-
extern void (*mce_threshold_vector)(void);
-extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
/* Deferred error interrupt handler */
extern void (*deferred_error_int_vector)(void);
@@ -356,28 +356,29 @@ enum smca_bank_types {
N_SMCA_BANK_TYPES
};
-struct smca_bank_name {
- const char *name; /* Short name for sysfs */
- const char *long_name; /* Long name for pretty-printing */
-};
-
-extern struct smca_bank_name smca_bank_names[N_SMCA_BANK_TYPES];
+#define HWID_MCATYPE(hwid, mcatype) (((hwid) << 16) | (mcatype))
+extern int mce_threshold_create_device(unsigned int cpu);
+extern int mce_threshold_remove_device(unsigned int cpu);
-#define HWID_MCATYPE(hwid, mcatype) ((hwid << 16) | mcatype)
-
-struct smca_hwid_mcatype {
+struct smca_hwid {
unsigned int bank_type; /* Use with smca_bank_types for easy indexing. */
u32 hwid_mcatype; /* (hwid,mcatype) tuple */
u32 xec_bitmap; /* Bitmap of valid ExtErrorCodes; current max is 21. */
};
-struct smca_bank_info {
- struct smca_hwid_mcatype *type;
- u32 type_instance;
+struct smca_bank {
+ struct smca_hwid *hwid;
+ /* Instance ID */
+ u32 id;
};
-extern struct smca_bank_info smca_banks[MAX_NR_BANKS];
+extern struct smca_bank smca_banks[MAX_NR_BANKS];
+
+extern const char *smca_get_long_name(enum smca_bank_types t);
+#else /* !CONFIG_X86_MCE_AMD: */
+static inline int mce_threshold_create_device(unsigned int cpu) { return 0; };
+static inline int mce_threshold_remove_device(unsigned int cpu) { return 0; };
#endif
#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index da0d81fa0b54..38711df3bcb5 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -20,6 +20,15 @@ do { \
(u32)((u64)(val)), \
(u32)((u64)(val) >> 32))
+struct ucode_patch {
+ struct list_head plist;
+ void *data; /* Intel uses only this one */
+ u32 patch_id;
+ u16 equiv_cpu;
+};
+
+extern struct list_head microcode_cache;
+
struct cpu_signature {
unsigned int sig;
unsigned int pf;
@@ -55,12 +64,7 @@ struct ucode_cpu_info {
void *mc;
};
extern struct ucode_cpu_info ucode_cpu_info[];
-
-#ifdef CONFIG_MICROCODE
-int __init microcode_init(void);
-#else
-static inline int __init microcode_init(void) { return 0; };
-#endif
+struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa);
#ifdef CONFIG_MICROCODE_INTEL
extern struct microcode_ops * __init init_intel_microcode(void);
@@ -131,11 +135,13 @@ static inline unsigned int x86_cpuid_family(void)
}
#ifdef CONFIG_MICROCODE
+int __init microcode_init(void);
extern void __init load_ucode_bsp(void);
extern void load_ucode_ap(void);
void reload_early_microcode(void);
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
#else
+static inline int __init microcode_init(void) { return 0; };
static inline void __init load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void reload_early_microcode(void) { }
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 15eb75484cc0..3e3e20be829a 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -40,38 +40,18 @@ struct microcode_amd {
unsigned int mpb[0];
};
-static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
- unsigned int sig)
-{
- int i = 0;
-
- if (!equiv_cpu_table)
- return 0;
-
- while (equiv_cpu_table[i].installed_cpu != 0) {
- if (sig == equiv_cpu_table[i].installed_cpu)
- return equiv_cpu_table[i].equiv_cpu;
-
- i++;
- }
- return 0;
-}
-
-extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
-extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
-
#define PATCH_MAX_SIZE PAGE_SIZE
#ifdef CONFIG_MICROCODE_AMD
extern void __init load_ucode_amd_bsp(unsigned int family);
-extern void load_ucode_amd_ap(void);
-extern int __init save_microcode_in_initrd_amd(void);
+extern void load_ucode_amd_ap(unsigned int family);
+extern int __init save_microcode_in_initrd_amd(unsigned int family);
void reload_ucode_amd(void);
#else
static inline void __init load_ucode_amd_bsp(unsigned int family) {}
-static inline void load_ucode_amd_ap(void) {}
-static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
+static inline void load_ucode_amd_ap(unsigned int family) {}
+static inline int __init
+save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
void reload_ucode_amd(void) {}
#endif
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 5e69154c9f07..195becc6f780 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -52,10 +52,6 @@ struct extended_sigtable {
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
-extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev);
-extern int microcode_sanity_check(void *mc, int print_err);
-extern int find_matching_signature(void *mc, unsigned int csig, int cpf);
-
#ifdef CONFIG_MICROCODE_INTEL
extern void __init load_ucode_intel_bsp(void);
extern void load_ucode_intel_ap(void);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 78f3760ca1f2..710273c617b8 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -37,6 +37,10 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
/* Intel MSRs. Some also available on other CPUs */
+
+#define MSR_PPIN_CTL 0x0000004e
+#define MSR_PPIN 0x0000004f
+
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index b5fee97813cd..db0b90c3b03e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -70,14 +70,14 @@ extern struct tracepoint __tracepoint_read_msr;
extern struct tracepoint __tracepoint_write_msr;
extern struct tracepoint __tracepoint_rdpmc;
#define msr_tracepoint_active(t) static_key_false(&(t).key)
-extern void do_trace_write_msr(unsigned msr, u64 val, int failed);
-extern void do_trace_read_msr(unsigned msr, u64 val, int failed);
-extern void do_trace_rdpmc(unsigned msr, u64 val, int failed);
+extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
+extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
+extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
#else
#define msr_tracepoint_active(t) false
-static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {}
-static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {}
-static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {}
+static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
+static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
+static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
#endif
static inline unsigned long long native_read_msr(unsigned int msr)
@@ -115,22 +115,36 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
}
/* Can be uninlined because referenced by paravirt */
-notrace static inline void native_write_msr(unsigned int msr,
- unsigned low, unsigned high)
+static inline void notrace
+__native_write_msr_notrace(unsigned int msr, u32 low, u32 high)
{
asm volatile("1: wrmsr\n"
"2:\n"
_ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe)
: : "c" (msr), "a"(low), "d" (high) : "memory");
+}
+
+/* Can be uninlined because referenced by paravirt */
+static inline void notrace
+native_write_msr(unsigned int msr, u32 low, u32 high)
+{
+ __native_write_msr_notrace(msr, low, high);
if (msr_tracepoint_active(__tracepoint_write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
}
+static inline void
+wrmsr_notrace(unsigned int msr, u32 low, u32 high)
+{
+ __native_write_msr_notrace(msr, low, high);
+}
+
/* Can be uninlined because referenced by paravirt */
-notrace static inline int native_write_msr_safe(unsigned int msr,
- unsigned low, unsigned high)
+static inline int notrace
+native_write_msr_safe(unsigned int msr, u32 low, u32 high)
{
int err;
+
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
@@ -223,7 +237,7 @@ do { \
(void)((high) = (u32)(__val >> 32)); \
} while (0)
-static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
+static inline void wrmsr(unsigned int msr, u32 low, u32 high)
{
native_write_msr(msr, low, high);
}
@@ -231,13 +245,13 @@ static inline void wrmsr(unsigned msr, unsigned low, unsigned high)
#define rdmsrl(msr, val) \
((val) = native_read_msr((msr)))
-static inline void wrmsrl(unsigned msr, u64 val)
+static inline void wrmsrl(unsigned int msr, u64 val)
{
native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32));
}
/* wrmsr with exception handling */
-static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
+static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high)
{
return native_write_msr_safe(msr, low, high);
}
@@ -252,7 +266,7 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
__err; \
})
-static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
+static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p)
{
int err;
@@ -325,12 +339,12 @@ static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs)
{
- rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
+ rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h));
}
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs)
{
- wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
+ wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h);
}
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h)
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h
deleted file mode 100644
index 7d3a48275394..000000000000
--- a/arch/x86/include/asm/mutex.h
+++ /dev/null
@@ -1,5 +0,0 @@
-#ifdef CONFIG_X86_32
-# include <asm/mutex_32.h>
-#else
-# include <asm/mutex_64.h>
-#endif
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
deleted file mode 100644
index e9355a84fc67..000000000000
--- a/arch/x86/include/asm/mutex_32.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Assembly implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- *
- * started by Ingo Molnar:
- *
- * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- */
-#ifndef _ASM_X86_MUTEX_32_H
-#define _ASM_X86_MUTEX_32_H
-
-#include <asm/alternative.h>
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fn> if it
- * wasn't 1 originally. This function MUST leave the value lower than 1
- * even when the "1" assertion wasn't true.
- */
-#define __mutex_fastpath_lock(count, fail_fn) \
-do { \
- unsigned int dummy; \
- \
- typecheck(atomic_t *, count); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
- " jns 1f \n" \
- " call " #fail_fn "\n" \
- "1:\n" \
- : "=a" (dummy) \
- : "a" (count) \
- : "memory", "ecx", "edx"); \
-} while (0)
-
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int __mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return -1;
- else
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value
- * to 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-#define __mutex_fastpath_unlock(count, fail_fn) \
-do { \
- unsigned int dummy; \
- \
- typecheck(atomic_t *, count); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
- " jg 1f\n" \
- " call " #fail_fn "\n" \
- "1:\n" \
- : "=a" (dummy) \
- : "a" (count) \
- : "memory", "ecx", "edx"); \
-} while (0)
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- */
-static inline int __mutex_fastpath_trylock(atomic_t *count,
- int (*fail_fn)(atomic_t *))
-{
- /* cmpxchg because it never induces a false contention state. */
- if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
- return 1;
-
- return 0;
-}
-
-#endif /* _ASM_X86_MUTEX_32_H */
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
deleted file mode 100644
index d9850758464e..000000000000
--- a/arch/x86/include/asm/mutex_64.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Assembly implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- *
- * started by Ingo Molnar:
- *
- * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- */
-#ifndef _ASM_X86_MUTEX_64_H
-#define _ASM_X86_MUTEX_64_H
-
-/**
- * __mutex_fastpath_lock - decrement and call function if negative
- * @v: pointer of type atomic_t
- * @fail_fn: function to call if the result is negative
- *
- * Atomically decrements @v and calls <fail_fn> if the result is negative.
- */
-#ifdef CC_HAVE_ASM_GOTO
-static inline void __mutex_fastpath_lock(atomic_t *v,
- void (*fail_fn)(atomic_t *))
-{
- asm_volatile_goto(LOCK_PREFIX " decl %0\n"
- " jns %l[exit]\n"
- : : "m" (v->counter)
- : "memory", "cc"
- : exit);
- fail_fn(v);
-exit:
- return;
-}
-#else
-#define __mutex_fastpath_lock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
- " jns 1f \n" \
- " call " #fail_fn "\n" \
- "1:" \
- : "=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
-} while (0)
-#endif
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int __mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(atomic_dec_return(count) < 0))
- return -1;
- else
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - increment and call function if nonpositive
- * @v: pointer of type atomic_t
- * @fail_fn: function to call if the result is nonpositive
- *
- * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
- */
-#ifdef CC_HAVE_ASM_GOTO
-static inline void __mutex_fastpath_unlock(atomic_t *v,
- void (*fail_fn)(atomic_t *))
-{
- asm_volatile_goto(LOCK_PREFIX " incl %0\n"
- " jg %l[exit]\n"
- : : "m" (v->counter)
- : "memory", "cc"
- : exit);
- fail_fn(v);
-exit:
- return;
-}
-#else
-#define __mutex_fastpath_unlock(v, fail_fn) \
-do { \
- unsigned long dummy; \
- \
- typecheck(atomic_t *, v); \
- typecheck_fn(void (*)(atomic_t *), fail_fn); \
- \
- asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
- " jg 1f\n" \
- " call " #fail_fn "\n" \
- "1:" \
- : "=D" (dummy) \
- : "D" (v) \
- : "rax", "rsi", "rdx", "rcx", \
- "r8", "r9", "r10", "r11", "memory"); \
-} while (0)
-#endif
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
- * if it wasn't 1 originally. [the fallback function is never used on
- * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
- */
-static inline int __mutex_fastpath_trylock(atomic_t *count,
- int (*fail_fn)(atomic_t *))
-{
- if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
- return 1;
-
- return 0;
-}
-
-#endif /* _ASM_X86_MUTEX_64_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ce932812f142..1eea6ca40694 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -41,11 +41,6 @@ static inline void set_debugreg(unsigned long val, int reg)
PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
}
-static inline void clts(void)
-{
- PVOP_VCALL0(pv_cpu_ops.clts);
-}
-
static inline unsigned long read_cr0(void)
{
return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
@@ -678,6 +673,11 @@ static __always_inline void pv_kick(int cpu)
PVOP_VCALL1(pv_lock_ops.kick, cpu);
}
+static __always_inline bool pv_vcpu_is_preempted(int cpu)
+{
+ return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+}
+
#endif /* SMP && PARAVIRT_SPINLOCKS */
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0f400c0e4979..ec111ef60b15 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -103,8 +103,6 @@ struct pv_cpu_ops {
unsigned long (*get_debugreg)(int regno);
void (*set_debugreg)(int regno, unsigned long value);
- void (*clts)(void);
-
unsigned long (*read_cr0)(void);
void (*write_cr0)(unsigned long);
@@ -310,6 +308,8 @@ struct pv_lock_ops {
void (*wait)(u8 *ptr, u8 val);
void (*kick)(int cpu);
+
+ struct paravirt_callee_save vcpu_is_preempted;
};
/* This contains all the paravirt structures: we get a convenient
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 84f58de08c2b..9fa03604b2b3 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -507,17 +507,6 @@ do { \
#endif
-/* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define x86_test_and_clear_bit_percpu(bit, var) \
-({ \
- bool old__; \
- asm volatile("btr %2,"__percpu_arg(1)"\n\t" \
- CC_SET(c) \
- : CC_OUT(c) (old__), "+m" (var) \
- : "dIr" (bit)); \
- old__; \
-})
-
static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
const unsigned long __percpu *addr)
{
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 17f218645701..ec1f3c651150 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -24,7 +24,13 @@ static __always_inline int preempt_count(void)
static __always_inline void preempt_count_set(int pc)
{
- raw_cpu_write_4(__preempt_count, pc);
+ int old, new;
+
+ do {
+ old = raw_cpu_read_4(__preempt_count);
+ new = (old & PREEMPT_NEED_RESCHED) |
+ (pc & ~PREEMPT_NEED_RESCHED);
+ } while (raw_cpu_cmpxchg_4(__preempt_count, old, new) != old);
}
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 984a7bf17f6a..1f6a92903b09 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -137,6 +137,17 @@ struct cpuinfo_x86 {
u32 microcode;
};
+struct cpuid_regs {
+ u32 eax, ebx, ecx, edx;
+};
+
+enum cpuid_regs_idx {
+ CPUID_EAX = 0,
+ CPUID_EBX,
+ CPUID_ECX,
+ CPUID_EDX,
+};
+
#define X86_VENDOR_INTEL 0
#define X86_VENDOR_CYRIX 1
#define X86_VENDOR_AMD 2
@@ -178,6 +189,9 @@ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
void print_cpu_msr(struct cpuinfo_x86 *);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
+extern u32 get_scattered_cpuid_leaf(unsigned int level,
+ unsigned int sub_leaf,
+ enum cpuid_regs_idx reg);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
@@ -588,8 +602,6 @@ static __always_inline void cpu_relax(void)
rep_nop();
}
-#define cpu_relax_lowlatency() cpu_relax()
-
/* Stop speculative execution and prefetching of modified code. */
static inline void sync_core(void)
{
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index eaba08076030..c343ab52579f 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -32,6 +32,12 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
{
pv_queued_spin_unlock(lock);
}
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return pv_vcpu_is_preempted(cpu);
+}
#else
static inline void queued_spin_unlock(struct qspinlock *lock)
{
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 19a2224f9e16..12af3e35edfa 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -6,11 +6,6 @@
#include <asm/nops.h>
-static inline void native_clts(void)
-{
- asm volatile("clts");
-}
-
/*
* Volatile isn't enough to prevent the compiler from reordering the
* read/write functions for the control registers and messing everything up.
@@ -208,16 +203,8 @@ static inline void load_gs_index(unsigned selector)
#endif
-/* Clear the 'TS' bit */
-static inline void clts(void)
-{
- native_clts();
-}
-
#endif/* CONFIG_PARAVIRT */
-#define stts() write_cr0(read_cr0() | X86_CR0_TS)
-
static inline void clflush(volatile void *__p)
{
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 37f2e0b377ad..a3269c897ec5 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -30,8 +30,7 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
int get_stack_info(unsigned long *stack, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask);
-void stack_type_str(enum stack_type type, const char **begin,
- const char **end);
+const char *stack_type_name(enum stack_type type);
static inline bool on_stack(struct stack_info *info, void *addr, size_t len)
{
@@ -43,8 +42,6 @@ static inline bool on_stack(struct stack_info *info, void *addr, size_t len)
addr + len > begin && addr + len <= end);
}
-extern int kstack_depth_to_print;
-
#ifdef CONFIG_X86_32
#define STACKSLOTS_PER_LINE 8
#else
@@ -86,9 +83,6 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs)
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, char *log_lvl);
-void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl);
-
extern unsigned int code_bytes;
/* The form of the top of the frame on the stack */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index cf75871d2f81..6358a85e2270 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -146,4 +146,36 @@ struct pci_bus;
int x86_pci_root_bus_node(int bus);
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
+extern bool x86_topology_update;
+
+#ifdef CONFIG_SCHED_MC_PRIO
+#include <asm/percpu.h>
+
+DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
+extern unsigned int __read_mostly sysctl_sched_itmt_enabled;
+
+/* Interface to set priority of a cpu */
+void sched_set_itmt_core_prio(int prio, int core_cpu);
+
+/* Interface to notify scheduler that system supports ITMT */
+int sched_set_itmt_support(void);
+
+/* Interface to notify scheduler that system revokes ITMT support */
+void sched_clear_itmt_support(void);
+
+#else /* CONFIG_SCHED_MC_PRIO */
+
+#define sysctl_sched_itmt_enabled 0
+static inline void sched_set_itmt_core_prio(int prio, int core_cpu)
+{
+}
+static inline int sched_set_itmt_support(void)
+{
+ return 0;
+}
+static inline void sched_clear_itmt_support(void)
+{
+}
+#endif /* CONFIG_SCHED_MC_PRIO */
+
#endif /* _ASM_X86_TOPOLOGY_H */
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1f5bf6..342e59789fcd 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
__field(struct fpu *, fpu)
__field(bool, fpregs_active)
__field(bool, fpstate_active)
- __field(int, counter)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
__entry->fpu = fpu;
__entry->fpregs_active = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active;
- __entry->counter = fpu->counter;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
}
),
- TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+ TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu,
__entry->fpregs_active,
__entry->fpstate_active,
- __entry->counter,
__entry->xfeatures,
__entry->xcomp_bv
)
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 33b6365c22fe..c054eaa7dc94 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -45,8 +45,17 @@ extern int tsc_clocksource_reliable;
* Boot-time check whether the TSCs are synchronized across
* all CPUs/cores:
*/
+#ifdef CONFIG_X86_TSC
+extern bool tsc_store_and_check_tsc_adjust(void);
+extern void tsc_verify_tsc_adjust(void);
extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
+#else
+static inline bool tsc_store_and_check_tsc_adjust(void) { return false; }
+static inline void tsc_verify_tsc_adjust(void) { }
+static inline void check_tsc_sync_source(int cpu) { }
+static inline void check_tsc_sync_target(void) { }
+#endif
extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index faf3687f1035..ea148313570f 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -68,6 +68,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
+#else
+# define WARN_ON_IN_IRQ()
+#endif
+
/**
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
@@ -88,8 +94,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
-#define access_ok(type, addr, size) \
- likely(!__range_not_ok(addr, size, user_addr_max()))
+#define access_ok(type, addr, size) \
+({ \
+ WARN_ON_IN_IRQ(); \
+ likely(!__range_not_ok(addr, size, user_addr_max())); \
+})
/*
* These are the main single-value transfer routines. They automatically
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
index 46de9ac4b990..c5a7f3a930dd 100644
--- a/arch/x86/include/asm/unwind.h
+++ b/arch/x86/include/asm/unwind.h
@@ -13,6 +13,7 @@ struct unwind_state {
int graph_idx;
#ifdef CONFIG_FRAME_POINTER
unsigned long *bp;
+ struct pt_regs *regs;
#else
unsigned long *sp;
#endif
@@ -47,7 +48,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
if (unwind_done(state))
return NULL;
- return state->bp + 1;
+ return state->regs ? &state->regs->ip : state->bp + 1;
+}
+
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return NULL;
+
+ return state->regs;
}
#else /* !CONFIG_FRAME_POINTER */
@@ -58,6 +67,11 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
return NULL;
}
+static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+{
+ return NULL;
+}
+
#endif /* CONFIG_FRAME_POINTER */
#endif /* _ASM_X86_UNWIND_H */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index e728699db774..3a01996db58f 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -89,8 +89,13 @@ static inline unsigned int __getcpu(void)
* works on all CPUs. This is volatile so that it orders
* correctly wrt barrier() and to keep gcc from cleverly
* hoisting it out of the calling function.
+ *
+ * If RDPID is available, use it.
*/
- asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ alternative_io ("lsl %[p],%[seg]",
+ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
+ X86_FEATURE_RDPID,
+ [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
return p;
}
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index c18ce67495fa..b10bf319ed20 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -7,6 +7,7 @@
#define SETUP_DTB 2
#define SETUP_PCI 3
#define SETUP_EFI 4
+#define SETUP_APPLE_PROPERTIES 5
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 94dc8ca434e0..1421a6585126 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -45,7 +45,9 @@ struct kvm_steal_time {
__u64 steal;
__u32 version;
__u32 flags;
- __u32 pad[12];
+ __u8 preempted;
+ __u8 u8_pad[3];
+ __u32 pad[11];
};
#define KVM_STEAL_ALIGNMENT_BITS 5
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 69a6e07e3149..eb6247a7009b 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -28,6 +28,7 @@ struct mce {
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
__u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
__u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
+ __u64 ppin; /* Protected Processor Inventory Number */
};
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
index ae135de547f5..835aa51c7f6e 100644
--- a/arch/x86/include/uapi/asm/prctl.h
+++ b/arch/x86/include/uapi/asm/prctl.h
@@ -6,10 +6,8 @@
#define ARCH_GET_FS 0x1003
#define ARCH_GET_GS 0x1004
-#ifdef CONFIG_CHECKPOINT_RESTORE
-# define ARCH_MAP_VDSO_X32 0x2001
-# define ARCH_MAP_VDSO_32 0x2002
-# define ARCH_MAP_VDSO_64 0x2003
-#endif
+#define ARCH_MAP_VDSO_X32 0x2001
+#define ARCH_MAP_VDSO_32 0x2002
+#define ARCH_MAP_VDSO_64 0x2003
#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 79076d75bdbf..581386c7e429 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -75,7 +75,7 @@ apm-y := apm_32.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += smpboot.o
-obj-$(CONFIG_SMP) += tsc_sync.o
+obj-$(CONFIG_X86_TSC) += tsc_sync.o
obj-$(CONFIG_SMP) += setup_percpu.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
@@ -123,6 +123,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o
+obj-$(CONFIG_SCHED_MC_PRIO) += itmt.o
ifdef CONFIG_FRAME_POINTER
obj-y += unwind_frame.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 931ced8ca345..4764fa56924d 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -76,6 +76,7 @@ int acpi_fix_pin2_polarity __initdata;
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#endif
+#ifdef CONFIG_X86_IO_APIC
/*
* Locks related to IOAPIC hotplug
* Hotplug side:
@@ -88,6 +89,7 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
* ->ioapic_lock
*/
static DEFINE_MUTEX(acpi_ioapic_lock);
+#endif
/* --------------------------------------------------------------------------
Boot-time Configuration
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 4fdf6230d93c..458da8509b75 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -13,8 +13,20 @@
#include <linux/spinlock.h>
#include <asm/amd_nb.h>
+#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
+
+/* Protect the PCI config register pairs used for SMN and DF indirect access. */
+static DEFINE_MUTEX(smn_mutex);
+
static u32 *flush_words;
+static const struct pci_device_id amd_root_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
+ {}
+};
+
const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
@@ -24,9 +36,10 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
-EXPORT_SYMBOL(amd_nb_misc_ids);
+EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
@@ -34,6 +47,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{}
};
@@ -44,8 +58,25 @@ const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
{ }
};
-struct amd_northbridge_info amd_northbridges;
-EXPORT_SYMBOL(amd_northbridges);
+static struct amd_northbridge_info amd_northbridges;
+
+u16 amd_nb_num(void)
+{
+ return amd_northbridges.num;
+}
+EXPORT_SYMBOL_GPL(amd_nb_num);
+
+bool amd_nb_has_feature(unsigned int feature)
+{
+ return ((amd_northbridges.flags & feature) == feature);
+}
+EXPORT_SYMBOL_GPL(amd_nb_has_feature);
+
+struct amd_northbridge *node_to_amd_nb(int node)
+{
+ return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
+}
+EXPORT_SYMBOL_GPL(node_to_amd_nb);
static struct pci_dev *next_northbridge(struct pci_dev *dev,
const struct pci_device_id *ids)
@@ -58,13 +89,106 @@ static struct pci_dev *next_northbridge(struct pci_dev *dev,
return dev;
}
+static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
+{
+ struct pci_dev *root;
+ int err = -ENODEV;
+
+ if (node >= amd_northbridges.num)
+ goto out;
+
+ root = node_to_amd_nb(node)->root;
+ if (!root)
+ goto out;
+
+ mutex_lock(&smn_mutex);
+
+ err = pci_write_config_dword(root, 0x60, address);
+ if (err) {
+ pr_warn("Error programming SMN address 0x%x.\n", address);
+ goto out_unlock;
+ }
+
+ err = (write ? pci_write_config_dword(root, 0x64, *value)
+ : pci_read_config_dword(root, 0x64, value));
+ if (err)
+ pr_warn("Error %s SMN address 0x%x.\n",
+ (write ? "writing to" : "reading from"), address);
+
+out_unlock:
+ mutex_unlock(&smn_mutex);
+
+out:
+ return err;
+}
+
+int amd_smn_read(u16 node, u32 address, u32 *value)
+{
+ return __amd_smn_rw(node, address, value, false);
+}
+EXPORT_SYMBOL_GPL(amd_smn_read);
+
+int amd_smn_write(u16 node, u32 address, u32 value)
+{
+ return __amd_smn_rw(node, address, &value, true);
+}
+EXPORT_SYMBOL_GPL(amd_smn_write);
+
+/*
+ * Data Fabric Indirect Access uses FICAA/FICAD.
+ *
+ * Fabric Indirect Configuration Access Address (FICAA): Constructed based
+ * on the device's Instance Id and the PCI function and register offset of
+ * the desired register.
+ *
+ * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
+ * and FICAD HI registers but so far we only need the LO register.
+ */
+int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
+{
+ struct pci_dev *F4;
+ u32 ficaa;
+ int err = -ENODEV;
+
+ if (node >= amd_northbridges.num)
+ goto out;
+
+ F4 = node_to_amd_nb(node)->link;
+ if (!F4)
+ goto out;
+
+ ficaa = 1;
+ ficaa |= reg & 0x3FC;
+ ficaa |= (func & 0x7) << 11;
+ ficaa |= instance_id << 16;
+
+ mutex_lock(&smn_mutex);
+
+ err = pci_write_config_dword(F4, 0x5C, ficaa);
+ if (err) {
+ pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
+ goto out_unlock;
+ }
+
+ err = pci_read_config_dword(F4, 0x98, lo);
+ if (err)
+ pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
+
+out_unlock:
+ mutex_unlock(&smn_mutex);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(amd_df_indirect_read);
+
int amd_cache_northbridges(void)
{
u16 i = 0;
struct amd_northbridge *nb;
- struct pci_dev *misc, *link;
+ struct pci_dev *root, *misc, *link;
- if (amd_nb_num())
+ if (amd_northbridges.num)
return 0;
misc = NULL;
@@ -74,15 +198,17 @@ int amd_cache_northbridges(void)
if (!i)
return -ENODEV;
- nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
+ nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
if (!nb)
return -ENOMEM;
amd_northbridges.nb = nb;
amd_northbridges.num = i;
- link = misc = NULL;
- for (i = 0; i != amd_nb_num(); i++) {
+ link = misc = root = NULL;
+ for (i = 0; i != amd_northbridges.num; i++) {
+ node_to_amd_nb(i)->root = root =
+ next_northbridge(root, amd_root_ids);
node_to_amd_nb(i)->misc = misc =
next_northbridge(misc, amd_nb_misc_ids);
node_to_amd_nb(i)->link = link =
@@ -139,13 +265,13 @@ struct resource *amd_get_mmconfig_range(struct resource *res)
{
u32 address;
u64 base, msr;
- unsigned segn_busn_bits;
+ unsigned int segn_busn_bits;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return NULL;
/* assume all cpus from fam10h have mmconfig */
- if (boot_cpu_data.x86 < 0x10)
+ if (boot_cpu_data.x86 < 0x10)
return NULL;
address = MSR_FAM10H_MMIO_CONF_BASE;
@@ -226,14 +352,14 @@ static void amd_cache_gart(void)
if (!amd_nb_has_feature(AMD_NB_GART))
return;
- flush_words = kmalloc(amd_nb_num() * sizeof(u32), GFP_KERNEL);
+ flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
if (!flush_words) {
amd_northbridges.flags &= ~AMD_NB_GART;
pr_notice("Cannot initialize GART flush words, GART support disabled\n");
return;
}
- for (i = 0; i != amd_nb_num(); i++)
+ for (i = 0; i != amd_northbridges.num; i++)
pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
}
@@ -246,18 +372,20 @@ void amd_flush_garts(void)
if (!amd_nb_has_feature(AMD_NB_GART))
return;
- /* Avoid races between AGP and IOMMU. In theory it's not needed
- but I'm not sure if the hardware won't lose flush requests
- when another is pending. This whole thing is so expensive anyways
- that it doesn't matter to serialize more. -AK */
+ /*
+ * Avoid races between AGP and IOMMU. In theory it's not needed
+ * but I'm not sure if the hardware won't lose flush requests
+ * when another is pending. This whole thing is so expensive anyways
+ * that it doesn't matter to serialize more. -AK
+ */
spin_lock_irqsave(&gart_lock, flags);
flushed = 0;
- for (i = 0; i < amd_nb_num(); i++) {
+ for (i = 0; i < amd_northbridges.num; i++) {
pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
flush_words[i] | 1);
flushed++;
}
- for (i = 0; i < amd_nb_num(); i++) {
+ for (i = 0; i < amd_northbridges.num; i++) {
u32 w;
/* Make sure the hardware actually executed the flush*/
for (;;) {
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 88c657b057e2..2686894350a4 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2263,6 +2263,7 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
/* Should happen once for each apic */
WARN_ON((*drv)->eoi_write == eoi_write);
+ (*drv)->native_eoi_write = (*drv)->eoi_write;
(*drv)->eoi_write = eoi_write;
}
}
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 51287cd90bf6..643818a7688b 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -906,14 +906,14 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
static int use_apm_idle; /* = 0 */
static unsigned int last_jiffies; /* = 0 */
static unsigned int last_stime; /* = 0 */
- cputime_t stime;
+ cputime_t stime, utime;
int apm_idle_done = 0;
unsigned int jiffies_since_last_check = jiffies - last_jiffies;
unsigned int bucket;
recalc:
- task_cputime(current, NULL, &stime);
+ task_cputime(current, &utime, &stime);
if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
use_apm_idle = 0;
} else if (jiffies_since_last_check > idle_period) {
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 4a8697f7d4ef..52000010c62e 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -20,13 +20,11 @@ obj-y := intel_cacheinfo.o scattered.o topology.o
obj-y += common.o
obj-y += rdrand.o
obj-y += match.o
+obj-y += bugs.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
-obj-$(CONFIG_X86_32) += bugs.o
-obj-$(CONFIG_X86_64) += bugs_64.o
-
obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
@@ -34,6 +32,8 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
+obj-$(CONFIG_INTEL_RDT_A) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_schemata.o
+
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 1e81a37c034e..4daad1e39352 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -314,11 +314,30 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
smp_num_siblings = ((ebx >> 8) & 3) + 1;
c->x86_max_cores /= smp_num_siblings;
c->cpu_core_id = ebx & 0xff;
+
+ /*
+ * We may have multiple LLCs if L3 caches exist, so check if we
+ * have an L3 cache by looking at the L3 cache CPUID leaf.
+ */
+ if (cpuid_edx(0x80000006)) {
+ if (c->x86 == 0x17) {
+ /*
+ * LLC is at the core complex level.
+ * Core complex id is ApicId[3].
+ */
+ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
+ } else {
+ /* LLC is at the node level. */
+ per_cpu(cpu_llc_id, cpu) = node_id;
+ }
+ }
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
node_id = value & 7;
+
+ per_cpu(cpu_llc_id, cpu) = node_id;
} else
return;
@@ -329,9 +348,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
cus_per_node = c->x86_max_cores / nodes_per_socket;
- /* store NodeID, use llc_shared_map to store sibling info */
- per_cpu(cpu_llc_id, cpu) = node_id;
-
/* core id has to be in the [0 .. cores_per_node - 1] range */
c->cpu_core_id %= cus_per_node;
}
@@ -356,15 +372,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
/* use socket ID also for last level cache */
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
amd_get_topology(c);
-
- /*
- * Fix percpu cpu_llc_id here as LLC topology is different
- * for Fam17h systems.
- */
- if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
- return;
-
- per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
#endif
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index bd17db15a2c1..a44ef52184df 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -16,15 +16,19 @@
#include <asm/msr.h>
#include <asm/paravirt.h>
#include <asm/alternative.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
void __init check_bugs(void)
{
identify_boot_cpu();
-#ifndef CONFIG_SMP
- pr_info("CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
+ if (!IS_ENABLED(CONFIG_SMP)) {
+ pr_info("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+ }
+
+#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
*
@@ -40,4 +44,18 @@ void __init check_bugs(void)
alternative_instructions();
fpu__init_check_bugs();
+#else /* CONFIG_X86_64 */
+ alternative_instructions();
+
+ /*
+ * Make sure the first 2MB area is not mapped by huge pages
+ * There are typically fixed size MTRRs in there and overlapping
+ * MTRRs into large pages causes slow downs.
+ *
+ * Right now we don't do that with gbpages because there seems
+ * very little benefit for that case.
+ */
+ if (!direct_gbpages)
+ set_memory_4k((unsigned long)__va(0), 1);
+#endif
}
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c
deleted file mode 100644
index a972ac4c7e7d..000000000000
--- a/arch/x86/kernel/cpu/bugs_64.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- * Copyright (C) 2000 SuSE
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/alternative.h>
-#include <asm/bugs.h>
-#include <asm/processor.h>
-#include <asm/mtrr.h>
-#include <asm/cacheflush.h>
-
-void __init check_bugs(void)
-{
- identify_boot_cpu();
-#if !defined(CONFIG_SMP)
- pr_info("CPU: ");
- print_cpu_info(&boot_cpu_data);
-#endif
- alternative_instructions();
-
- /*
- * Make sure the first 2MB area is not mapped by huge pages
- * There are typically fixed size MTRRs in there and overlapping
- * MTRRs into large pages causes slow downs.
- *
- * Right now we don't do that with gbpages because there seems
- * very little benefit for that case.
- */
- if (!direct_gbpages)
- set_memory_4k((unsigned long)__va(0), 1);
-}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cc9e980c68ec..90c007447507 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1190,51 +1190,6 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
mtrr_ap_init();
}
-struct msr_range {
- unsigned min;
- unsigned max;
-};
-
-static const struct msr_range msr_range_array[] = {
- { 0x00000000, 0x00000418},
- { 0xc0000000, 0xc000040b},
- { 0xc0010000, 0xc0010142},
- { 0xc0011000, 0xc001103b},
-};
-
-static void __print_cpu_msr(void)
-{
- unsigned index_min, index_max;
- unsigned index;
- u64 val;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
- index_min = msr_range_array[i].min;
- index_max = msr_range_array[i].max;
-
- for (index = index_min; index < index_max; index++) {
- if (rdmsrl_safe(index, &val))
- continue;
- pr_info(" MSR%08x: %016llx\n", index, val);
- }
- }
-}
-
-static int show_msr;
-
-static __init int setup_show_msr(char *arg)
-{
- int num;
-
- get_option(&arg, &num);
-
- if (num > 0)
- show_msr = num;
- return 1;
-}
-__setup("show_msr=", setup_show_msr);
-
static __init int setup_noclflush(char *arg)
{
setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
@@ -1268,14 +1223,6 @@ void print_cpu_info(struct cpuinfo_x86 *c)
pr_cont(", stepping: 0x%x)\n", c->x86_mask);
else
pr_cont(")\n");
-
- print_cpu_msr(c);
-}
-
-void print_cpu_msr(struct cpuinfo_x86 *c)
-{
- if (c->cpu_index < show_msr)
- __print_cpu_msr();
}
static __init int setup_disablecpuid(char *arg)
@@ -1490,11 +1437,8 @@ void cpu_init(void)
*/
cr4_init_shadow();
- /*
- * Load microcode on this cpu if a valid microcode is available.
- * This is early microcode loading procedure.
- */
- load_ucode_ap();
+ if (cpu)
+ load_ucode_ap();
t = &per_cpu(cpu_tss, cpu);
oist = &per_cpu(orig_ist, cpu);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index be6337156502..0282b0df004a 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -153,6 +153,7 @@ struct _cpuid4_info_regs {
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
+ unsigned int id;
unsigned long size;
struct amd_northbridge *nb;
};
@@ -894,6 +895,8 @@ static void __cache_cpumap_setup(unsigned int cpu, int index,
static void ci_leaf_init(struct cacheinfo *this_leaf,
struct _cpuid4_info_regs *base)
{
+ this_leaf->id = base->id;
+ this_leaf->attributes = CACHE_ID;
this_leaf->level = base->eax.split.level;
this_leaf->type = cache_type_map[base->eax.split.type];
this_leaf->coherency_line_size =
@@ -920,6 +923,22 @@ static int __init_cache_level(unsigned int cpu)
return 0;
}
+/*
+ * The max shared threads number comes from CPUID.4:EAX[25-14] with input
+ * ECX as cache index. Then right shift apicid by the number's order to get
+ * cache id for this cache node.
+ */
+static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
+{
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned long num_threads_sharing;
+ int index_msb;
+
+ num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
+ index_msb = get_count_order(num_threads_sharing);
+ id4_regs->id = c->apicid >> index_msb;
+}
+
static int __populate_cache_leaves(unsigned int cpu)
{
unsigned int idx, ret;
@@ -931,6 +950,7 @@ static int __populate_cache_leaves(unsigned int cpu)
ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
if (ret)
return ret;
+ get_cache_id(cpu, &id4_regs);
ci_leaf_init(this_leaf++, &id4_regs);
__cache_cpumap_setup(cpu, idx, &id4_regs);
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
new file mode 100644
index 000000000000..5a533fefefa0
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -0,0 +1,403 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Cache Allocation code.
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Tony Luck <tony.luck@intel.com>
+ * Vikas Shivappa <vikas.shivappa@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpuhotplug.h>
+
+#include <asm/intel-family.h>
+#include <asm/intel_rdt.h>
+
+/* Mutex to protect rdtgroup access. */
+DEFINE_MUTEX(rdtgroup_mutex);
+
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_closid);
+
+#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
+
+struct rdt_resource rdt_resources_all[] = {
+ {
+ .name = "L3",
+ .domains = domain_init(RDT_RESOURCE_L3),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 1,
+ .cbm_idx_offset = 0
+ },
+ {
+ .name = "L3DATA",
+ .domains = domain_init(RDT_RESOURCE_L3DATA),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 2,
+ .cbm_idx_offset = 0
+ },
+ {
+ .name = "L3CODE",
+ .domains = domain_init(RDT_RESOURCE_L3CODE),
+ .msr_base = IA32_L3_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 3,
+ .cbm_idx_multi = 2,
+ .cbm_idx_offset = 1
+ },
+ {
+ .name = "L2",
+ .domains = domain_init(RDT_RESOURCE_L2),
+ .msr_base = IA32_L2_CBM_BASE,
+ .min_cbm_bits = 1,
+ .cache_level = 2,
+ .cbm_idx_multi = 1,
+ .cbm_idx_offset = 0
+ },
+};
+
+static int cbm_idx(struct rdt_resource *r, int closid)
+{
+ return closid * r->cbm_idx_multi + r->cbm_idx_offset;
+}
+
+/*
+ * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
+ * as they do not have CPUID enumeration support for Cache allocation.
+ * The check for Vendor/Family/Model is not enough to guarantee that
+ * the MSRs won't #GP fault because only the following SKUs support
+ * CAT:
+ * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
+ * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
+ * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
+ * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
+ * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
+ *
+ * Probe by trying to write the first of the L3 cach mask registers
+ * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
+ * is always 20 on hsw server parts. The minimum cache bitmask length
+ * allowed for HSW server is always 2 bits. Hardcode all of them.
+ */
+static inline bool cache_alloc_hsw_probe(void)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == INTEL_FAM6_HASWELL_X) {
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+ u32 l, h, max_cbm = BIT_MASK(20) - 1;
+
+ if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
+ return false;
+ rdmsr(IA32_L3_CBM_BASE, l, h);
+
+ /* If all the bits were set in MSR, return success */
+ if (l != max_cbm)
+ return false;
+
+ r->num_closid = 4;
+ r->cbm_len = 20;
+ r->max_cbm = max_cbm;
+ r->min_cbm_bits = 2;
+ r->capable = true;
+ r->enabled = true;
+
+ return true;
+ }
+
+ return false;
+}
+
+static void rdt_get_config(int idx, struct rdt_resource *r)
+{
+ union cpuid_0x10_1_eax eax;
+ union cpuid_0x10_1_edx edx;
+ u32 ebx, ecx;
+
+ cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
+ r->num_closid = edx.split.cos_max + 1;
+ r->cbm_len = eax.split.cbm_len + 1;
+ r->max_cbm = BIT_MASK(eax.split.cbm_len + 1) - 1;
+ r->capable = true;
+ r->enabled = true;
+}
+
+static void rdt_get_cdp_l3_config(int type)
+{
+ struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ struct rdt_resource *r = &rdt_resources_all[type];
+
+ r->num_closid = r_l3->num_closid / 2;
+ r->cbm_len = r_l3->cbm_len;
+ r->max_cbm = r_l3->max_cbm;
+ r->capable = true;
+ /*
+ * By default, CDP is disabled. CDP can be enabled by mount parameter
+ * "cdp" during resctrl file system mount time.
+ */
+ r->enabled = false;
+}
+
+static inline bool get_rdt_resources(void)
+{
+ bool ret = false;
+
+ if (cache_alloc_hsw_probe())
+ return true;
+
+ if (!boot_cpu_has(X86_FEATURE_RDT_A))
+ return false;
+
+ if (boot_cpu_has(X86_FEATURE_CAT_L3)) {
+ rdt_get_config(1, &rdt_resources_all[RDT_RESOURCE_L3]);
+ if (boot_cpu_has(X86_FEATURE_CDP_L3)) {
+ rdt_get_cdp_l3_config(RDT_RESOURCE_L3DATA);
+ rdt_get_cdp_l3_config(RDT_RESOURCE_L3CODE);
+ }
+ ret = true;
+ }
+ if (boot_cpu_has(X86_FEATURE_CAT_L2)) {
+ /* CPUID 0x10.2 fields are same format at 0x10.1 */
+ rdt_get_config(2, &rdt_resources_all[RDT_RESOURCE_L2]);
+ ret = true;
+ }
+
+ return ret;
+}
+
+static int get_cache_id(int cpu, int level)
+{
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
+ int i;
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == level)
+ return ci->info_list[i].id;
+ }
+
+ return -1;
+}
+
+void rdt_cbm_update(void *arg)
+{
+ struct msr_param *m = (struct msr_param *)arg;
+ struct rdt_resource *r = m->res;
+ int i, cpu = smp_processor_id();
+ struct rdt_domain *d;
+
+ list_for_each_entry(d, &r->domains, list) {
+ /* Find the domain that contains this CPU */
+ if (cpumask_test_cpu(cpu, &d->cpu_mask))
+ goto found;
+ }
+ pr_info_once("cpu %d not found in any domain for resource %s\n",
+ cpu, r->name);
+
+ return;
+
+found:
+ for (i = m->low; i < m->high; i++) {
+ int idx = cbm_idx(r, i);
+
+ wrmsrl(r->msr_base + idx, d->cbm[i]);
+ }
+}
+
+/*
+ * rdt_find_domain - Find a domain in a resource that matches input resource id
+ *
+ * Search resource r's domain list to find the resource id. If the resource
+ * id is found in a domain, return the domain. Otherwise, if requested by
+ * caller, return the first domain whose id is bigger than the input id.
+ * The domain list is sorted by id in ascending order.
+ */
+static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
+ struct list_head **pos)
+{
+ struct rdt_domain *d;
+ struct list_head *l;
+
+ if (id < 0)
+ return ERR_PTR(id);
+
+ list_for_each(l, &r->domains) {
+ d = list_entry(l, struct rdt_domain, list);
+ /* When id is found, return its domain. */
+ if (id == d->id)
+ return d;
+ /* Stop searching when finding id's position in sorted list. */
+ if (id < d->id)
+ break;
+ }
+
+ if (pos)
+ *pos = l;
+
+ return NULL;
+}
+
+/*
+ * domain_add_cpu - Add a cpu to a resource's domain list.
+ *
+ * If an existing domain in the resource r's domain list matches the cpu's
+ * resource id, add the cpu in the domain.
+ *
+ * Otherwise, a new domain is allocated and inserted into the right position
+ * in the domain list sorted by id in ascending order.
+ *
+ * The order in the domain list is visible to users when we print entries
+ * in the schemata file and schemata input is validated to have the same order
+ * as this list.
+ */
+static void domain_add_cpu(int cpu, struct rdt_resource *r)
+{
+ int i, id = get_cache_id(cpu, r->cache_level);
+ struct list_head *add_pos = NULL;
+ struct rdt_domain *d;
+
+ d = rdt_find_domain(r, id, &add_pos);
+ if (IS_ERR(d)) {
+ pr_warn("Could't find cache id for cpu %d\n", cpu);
+ return;
+ }
+
+ if (d) {
+ cpumask_set_cpu(cpu, &d->cpu_mask);
+ return;
+ }
+
+ d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
+ if (!d)
+ return;
+
+ d->id = id;
+
+ d->cbm = kmalloc_array(r->num_closid, sizeof(*d->cbm), GFP_KERNEL);
+ if (!d->cbm) {
+ kfree(d);
+ return;
+ }
+
+ for (i = 0; i < r->num_closid; i++) {
+ int idx = cbm_idx(r, i);
+
+ d->cbm[i] = r->max_cbm;
+ wrmsrl(r->msr_base + idx, d->cbm[i]);
+ }
+
+ cpumask_set_cpu(cpu, &d->cpu_mask);
+ list_add_tail(&d->list, add_pos);
+ r->num_domains++;
+}
+
+static void domain_remove_cpu(int cpu, struct rdt_resource *r)
+{
+ int id = get_cache_id(cpu, r->cache_level);
+ struct rdt_domain *d;
+
+ d = rdt_find_domain(r, id, NULL);
+ if (IS_ERR_OR_NULL(d)) {
+ pr_warn("Could't find cache id for cpu %d\n", cpu);
+ return;
+ }
+
+ cpumask_clear_cpu(cpu, &d->cpu_mask);
+ if (cpumask_empty(&d->cpu_mask)) {
+ r->num_domains--;
+ kfree(d->cbm);
+ list_del(&d->list);
+ kfree(d);
+ }
+}
+
+static void clear_closid(int cpu)
+{
+ struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+
+ per_cpu(cpu_closid, cpu) = 0;
+ state->closid = 0;
+ wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, 0);
+}
+
+static int intel_rdt_online_cpu(unsigned int cpu)
+{
+ struct rdt_resource *r;
+
+ mutex_lock(&rdtgroup_mutex);
+ for_each_capable_rdt_resource(r)
+ domain_add_cpu(cpu, r);
+ /* The cpu is set in default rdtgroup after online. */
+ cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
+ clear_closid(cpu);
+ mutex_unlock(&rdtgroup_mutex);
+
+ return 0;
+}
+
+static int intel_rdt_offline_cpu(unsigned int cpu)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+
+ mutex_lock(&rdtgroup_mutex);
+ for_each_capable_rdt_resource(r)
+ domain_remove_cpu(cpu, r);
+ list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
+ if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask))
+ break;
+ }
+ clear_closid(cpu);
+ mutex_unlock(&rdtgroup_mutex);
+
+ return 0;
+}
+
+static int __init intel_rdt_late_init(void)
+{
+ struct rdt_resource *r;
+ int state, ret;
+
+ if (!get_rdt_resources())
+ return -ENODEV;
+
+ state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "x86/rdt/cat:online:",
+ intel_rdt_online_cpu, intel_rdt_offline_cpu);
+ if (state < 0)
+ return state;
+
+ ret = rdtgroup_init();
+ if (ret) {
+ cpuhp_remove_state(state);
+ return ret;
+ }
+
+ for_each_capable_rdt_resource(r)
+ pr_info("Intel RDT %s allocation detected\n", r->name);
+
+ return 0;
+}
+
+late_initcall(intel_rdt_late_init);
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
new file mode 100644
index 000000000000..1afd3f393501
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -0,0 +1,1107 @@
+/*
+ * User interface for Resource Alloction in Resource Director Technology(RDT)
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Fenghua Yu <fenghua.yu@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/cpu.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/kernfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/task_work.h>
+
+#include <uapi/linux/magic.h>
+
+#include <asm/intel_rdt.h>
+#include <asm/intel_rdt_common.h>
+
+DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
+struct kernfs_root *rdt_root;
+struct rdtgroup rdtgroup_default;
+LIST_HEAD(rdt_all_groups);
+
+/* Kernel fs node for "info" directory under root */
+static struct kernfs_node *kn_info;
+
+/*
+ * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
+ * we can keep a bitmap of free CLOSIDs in a single integer.
+ *
+ * Using a global CLOSID across all resources has some advantages and
+ * some drawbacks:
+ * + We can simply set "current->closid" to assign a task to a resource
+ * group.
+ * + Context switch code can avoid extra memory references deciding which
+ * CLOSID to load into the PQR_ASSOC MSR
+ * - We give up some options in configuring resource groups across multi-socket
+ * systems.
+ * - Our choices on how to configure each resource become progressively more
+ * limited as the number of resources grows.
+ */
+static int closid_free_map;
+
+static void closid_init(void)
+{
+ struct rdt_resource *r;
+ int rdt_min_closid = 32;
+
+ /* Compute rdt_min_closid across all resources */
+ for_each_enabled_rdt_resource(r)
+ rdt_min_closid = min(rdt_min_closid, r->num_closid);
+
+ closid_free_map = BIT_MASK(rdt_min_closid) - 1;
+
+ /* CLOSID 0 is always reserved for the default group */
+ closid_free_map &= ~1;
+}
+
+int closid_alloc(void)
+{
+ int closid = ffs(closid_free_map);
+
+ if (closid == 0)
+ return -ENOSPC;
+ closid--;
+ closid_free_map &= ~(1 << closid);
+
+ return closid;
+}
+
+static void closid_free(int closid)
+{
+ closid_free_map |= 1 << closid;
+}
+
+/* set uid and gid of rdtgroup dirs and files to that of the creator */
+static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
+{
+ struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
+ .ia_uid = current_fsuid(),
+ .ia_gid = current_fsgid(), };
+
+ if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
+ gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
+ return 0;
+
+ return kernfs_setattr(kn, &iattr);
+}
+
+static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
+{
+ struct kernfs_node *kn;
+ int ret;
+
+ kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
+ 0, rft->kf_ops, rft, NULL, NULL);
+ if (IS_ERR(kn))
+ return PTR_ERR(kn);
+
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret) {
+ kernfs_remove(kn);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rdtgroup_add_files(struct kernfs_node *kn, struct rftype *rfts,
+ int len)
+{
+ struct rftype *rft;
+ int ret;
+
+ lockdep_assert_held(&rdtgroup_mutex);
+
+ for (rft = rfts; rft < rfts + len; rft++) {
+ ret = rdtgroup_add_file(kn, rft);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+error:
+ pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
+ while (--rft >= rfts)
+ kernfs_remove_by_name(kn, rft->name);
+ return ret;
+}
+
+static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
+{
+ struct kernfs_open_file *of = m->private;
+ struct rftype *rft = of->kn->priv;
+
+ if (rft->seq_show)
+ return rft->seq_show(of, m, arg);
+ return 0;
+}
+
+static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, loff_t off)
+{
+ struct rftype *rft = of->kn->priv;
+
+ if (rft->write)
+ return rft->write(of, buf, nbytes, off);
+
+ return -EINVAL;
+}
+
+static struct kernfs_ops rdtgroup_kf_single_ops = {
+ .atomic_write_len = PAGE_SIZE,
+ .write = rdtgroup_file_write,
+ .seq_show = rdtgroup_seqfile_show,
+};
+
+static int rdtgroup_cpus_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ int ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+ if (rdtgrp)
+ seq_printf(s, "%*pb\n", cpumask_pr_args(&rdtgrp->cpu_mask));
+ else
+ ret = -ENOENT;
+ rdtgroup_kn_unlock(of->kn);
+
+ return ret;
+}
+
+/*
+ * This is safe against intel_rdt_sched_in() called from __switch_to()
+ * because __switch_to() is executed with interrupts disabled. A local call
+ * from rdt_update_closid() is proteced against __switch_to() because
+ * preemption is disabled.
+ */
+static void rdt_update_cpu_closid(void *closid)
+{
+ if (closid)
+ this_cpu_write(cpu_closid, *(int *)closid);
+ /*
+ * We cannot unconditionally write the MSR because the current
+ * executing task might have its own closid selected. Just reuse
+ * the context switch code.
+ */
+ intel_rdt_sched_in();
+}
+
+/*
+ * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
+ *
+ * Per task closids must have been set up before calling this function.
+ *
+ * The per cpu closids are updated with the smp function call, when @closid
+ * is not NULL. If @closid is NULL then all affected percpu closids must
+ * have been set up before calling this function.
+ */
+static void
+rdt_update_closid(const struct cpumask *cpu_mask, int *closid)
+{
+ int cpu = get_cpu();
+
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ rdt_update_cpu_closid(closid);
+ smp_call_function_many(cpu_mask, rdt_update_cpu_closid, closid, 1);
+ put_cpu();
+}
+
+static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ cpumask_var_t tmpmask, newmask;
+ struct rdtgroup *rdtgrp, *r;
+ int ret;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+ if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
+ free_cpumask_var(tmpmask);
+ return -ENOMEM;
+ }
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ ret = cpumask_parse(buf, newmask);
+ if (ret)
+ goto unlock;
+
+ /* check that user didn't specify any offline cpus */
+ cpumask_andnot(tmpmask, newmask, cpu_online_mask);
+ if (cpumask_weight(tmpmask)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /* Check whether cpus are dropped from this group */
+ cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
+ if (cpumask_weight(tmpmask)) {
+ /* Can't drop from default group */
+ if (rdtgrp == &rdtgroup_default) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ /* Give any dropped cpus to rdtgroup_default */
+ cpumask_or(&rdtgroup_default.cpu_mask,
+ &rdtgroup_default.cpu_mask, tmpmask);
+ rdt_update_closid(tmpmask, &rdtgroup_default.closid);
+ }
+
+ /*
+ * If we added cpus, remove them from previous group that owned them
+ * and update per-cpu closid
+ */
+ cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
+ if (cpumask_weight(tmpmask)) {
+ list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
+ if (r == rdtgrp)
+ continue;
+ cpumask_andnot(&r->cpu_mask, &r->cpu_mask, tmpmask);
+ }
+ rdt_update_closid(tmpmask, &rdtgrp->closid);
+ }
+
+ /* Done pushing/pulling - update this group with new mask */
+ cpumask_copy(&rdtgrp->cpu_mask, newmask);
+
+unlock:
+ rdtgroup_kn_unlock(of->kn);
+ free_cpumask_var(tmpmask);
+ free_cpumask_var(newmask);
+
+ return ret ?: nbytes;
+}
+
+struct task_move_callback {
+ struct callback_head work;
+ struct rdtgroup *rdtgrp;
+};
+
+static void move_myself(struct callback_head *head)
+{
+ struct task_move_callback *callback;
+ struct rdtgroup *rdtgrp;
+
+ callback = container_of(head, struct task_move_callback, work);
+ rdtgrp = callback->rdtgrp;
+
+ /*
+ * If resource group was deleted before this task work callback
+ * was invoked, then assign the task to root group and free the
+ * resource group.
+ */
+ if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+ (rdtgrp->flags & RDT_DELETED)) {
+ current->closid = 0;
+ kfree(rdtgrp);
+ }
+
+ preempt_disable();
+ /* update PQR_ASSOC MSR to make resource group go into effect */
+ intel_rdt_sched_in();
+ preempt_enable();
+
+ kfree(callback);
+}
+
+static int __rdtgroup_move_task(struct task_struct *tsk,
+ struct rdtgroup *rdtgrp)
+{
+ struct task_move_callback *callback;
+ int ret;
+
+ callback = kzalloc(sizeof(*callback), GFP_KERNEL);
+ if (!callback)
+ return -ENOMEM;
+ callback->work.func = move_myself;
+ callback->rdtgrp = rdtgrp;
+
+ /*
+ * Take a refcount, so rdtgrp cannot be freed before the
+ * callback has been invoked.
+ */
+ atomic_inc(&rdtgrp->waitcount);
+ ret = task_work_add(tsk, &callback->work, true);
+ if (ret) {
+ /*
+ * Task is exiting. Drop the refcount and free the callback.
+ * No need to check the refcount as the group cannot be
+ * deleted before the write function unlocks rdtgroup_mutex.
+ */
+ atomic_dec(&rdtgrp->waitcount);
+ kfree(callback);
+ } else {
+ tsk->closid = rdtgrp->closid;
+ }
+ return ret;
+}
+
+static int rdtgroup_task_write_permission(struct task_struct *task,
+ struct kernfs_open_file *of)
+{
+ const struct cred *tcred = get_task_cred(task);
+ const struct cred *cred = current_cred();
+ int ret = 0;
+
+ /*
+ * Even if we're attaching all tasks in the thread group, we only
+ * need to check permissions on one of them.
+ */
+ if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
+ !uid_eq(cred->euid, tcred->uid) &&
+ !uid_eq(cred->euid, tcred->suid))
+ ret = -EPERM;
+
+ put_cred(tcred);
+ return ret;
+}
+
+static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
+ struct kernfs_open_file *of)
+{
+ struct task_struct *tsk;
+ int ret;
+
+ rcu_read_lock();
+ if (pid) {
+ tsk = find_task_by_vpid(pid);
+ if (!tsk) {
+ rcu_read_unlock();
+ return -ESRCH;
+ }
+ } else {
+ tsk = current;
+ }
+
+ get_task_struct(tsk);
+ rcu_read_unlock();
+
+ ret = rdtgroup_task_write_permission(tsk, of);
+ if (!ret)
+ ret = __rdtgroup_move_task(tsk, rdtgrp);
+
+ put_task_struct(tsk);
+ return ret;
+}
+
+static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdtgroup *rdtgrp;
+ int ret = 0;
+ pid_t pid;
+
+ if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+ return -EINVAL;
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+
+ if (rdtgrp)
+ ret = rdtgroup_move_task(pid, rdtgrp, of);
+ else
+ ret = -ENOENT;
+
+ rdtgroup_kn_unlock(of->kn);
+
+ return ret ?: nbytes;
+}
+
+static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
+{
+ struct task_struct *p, *t;
+
+ rcu_read_lock();
+ for_each_process_thread(p, t) {
+ if (t->closid == r->closid)
+ seq_printf(s, "%d\n", t->pid);
+ }
+ rcu_read_unlock();
+}
+
+static int rdtgroup_tasks_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ int ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (rdtgrp)
+ show_rdt_tasks(rdtgrp, s);
+ else
+ ret = -ENOENT;
+ rdtgroup_kn_unlock(of->kn);
+
+ return ret;
+}
+
+/* Files in each rdtgroup */
+static struct rftype rdtgroup_base_files[] = {
+ {
+ .name = "cpus",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_cpus_write,
+ .seq_show = rdtgroup_cpus_show,
+ },
+ {
+ .name = "tasks",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_tasks_write,
+ .seq_show = rdtgroup_tasks_show,
+ },
+ {
+ .name = "schemata",
+ .mode = 0644,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .write = rdtgroup_schemata_write,
+ .seq_show = rdtgroup_schemata_show,
+ },
+};
+
+static int rdt_num_closids_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%d\n", r->num_closid);
+
+ return 0;
+}
+
+static int rdt_cbm_mask_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%x\n", r->max_cbm);
+
+ return 0;
+}
+
+static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
+ struct seq_file *seq, void *v)
+{
+ struct rdt_resource *r = of->kn->parent->priv;
+
+ seq_printf(seq, "%d\n", r->min_cbm_bits);
+
+ return 0;
+}
+
+/* rdtgroup information files for one cache resource. */
+static struct rftype res_info_files[] = {
+ {
+ .name = "num_closids",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_num_closids_show,
+ },
+ {
+ .name = "cbm_mask",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_cbm_mask_show,
+ },
+ {
+ .name = "min_cbm_bits",
+ .mode = 0444,
+ .kf_ops = &rdtgroup_kf_single_ops,
+ .seq_show = rdt_min_cbm_bits_show,
+ },
+};
+
+static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
+{
+ struct kernfs_node *kn_subdir;
+ struct rdt_resource *r;
+ int ret;
+
+ /* create the directory */
+ kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
+ if (IS_ERR(kn_info))
+ return PTR_ERR(kn_info);
+ kernfs_get(kn_info);
+
+ for_each_enabled_rdt_resource(r) {
+ kn_subdir = kernfs_create_dir(kn_info, r->name,
+ kn_info->mode, r);
+ if (IS_ERR(kn_subdir)) {
+ ret = PTR_ERR(kn_subdir);
+ goto out_destroy;
+ }
+ kernfs_get(kn_subdir);
+ ret = rdtgroup_kn_set_ugid(kn_subdir);
+ if (ret)
+ goto out_destroy;
+ ret = rdtgroup_add_files(kn_subdir, res_info_files,
+ ARRAY_SIZE(res_info_files));
+ if (ret)
+ goto out_destroy;
+ kernfs_activate(kn_subdir);
+ }
+
+ /*
+ * This extra ref will be put in kernfs_remove() and guarantees
+ * that @rdtgrp->kn is always accessible.
+ */
+ kernfs_get(kn_info);
+
+ ret = rdtgroup_kn_set_ugid(kn_info);
+ if (ret)
+ goto out_destroy;
+
+ kernfs_activate(kn_info);
+
+ return 0;
+
+out_destroy:
+ kernfs_remove(kn_info);
+ return ret;
+}
+
+static void l3_qos_cfg_update(void *arg)
+{
+ bool *enable = arg;
+
+ wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
+}
+
+static int set_l3_qos_cfg(struct rdt_resource *r, bool enable)
+{
+ cpumask_var_t cpu_mask;
+ struct rdt_domain *d;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ list_for_each_entry(d, &r->domains, list) {
+ /* Pick one CPU from each domain instance to update MSR */
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ }
+ cpu = get_cpu();
+ /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ l3_qos_cfg_update(&enable);
+ /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
+ smp_call_function_many(cpu_mask, l3_qos_cfg_update, &enable, 1);
+ put_cpu();
+
+ free_cpumask_var(cpu_mask);
+
+ return 0;
+}
+
+static int cdp_enable(void)
+{
+ struct rdt_resource *r_l3data = &rdt_resources_all[RDT_RESOURCE_L3DATA];
+ struct rdt_resource *r_l3code = &rdt_resources_all[RDT_RESOURCE_L3CODE];
+ struct rdt_resource *r_l3 = &rdt_resources_all[RDT_RESOURCE_L3];
+ int ret;
+
+ if (!r_l3->capable || !r_l3data->capable || !r_l3code->capable)
+ return -EINVAL;
+
+ ret = set_l3_qos_cfg(r_l3, true);
+ if (!ret) {
+ r_l3->enabled = false;
+ r_l3data->enabled = true;
+ r_l3code->enabled = true;
+ }
+ return ret;
+}
+
+static void cdp_disable(void)
+{
+ struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
+
+ r->enabled = r->capable;
+
+ if (rdt_resources_all[RDT_RESOURCE_L3DATA].enabled) {
+ rdt_resources_all[RDT_RESOURCE_L3DATA].enabled = false;
+ rdt_resources_all[RDT_RESOURCE_L3CODE].enabled = false;
+ set_l3_qos_cfg(r, false);
+ }
+}
+
+static int parse_rdtgroupfs_options(char *data)
+{
+ char *token, *o = data;
+ int ret = 0;
+
+ while ((token = strsep(&o, ",")) != NULL) {
+ if (!*token)
+ return -EINVAL;
+
+ if (!strcmp(token, "cdp"))
+ ret = cdp_enable();
+ }
+
+ return ret;
+}
+
+/*
+ * We don't allow rdtgroup directories to be created anywhere
+ * except the root directory. Thus when looking for the rdtgroup
+ * structure for a kernfs node we are either looking at a directory,
+ * in which case the rdtgroup structure is pointed at by the "priv"
+ * field, otherwise we have a file, and need only look to the parent
+ * to find the rdtgroup.
+ */
+static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
+{
+ if (kernfs_type(kn) == KERNFS_DIR) {
+ /*
+ * All the resource directories use "kn->priv"
+ * to point to the "struct rdtgroup" for the
+ * resource. "info" and its subdirectories don't
+ * have rdtgroup structures, so return NULL here.
+ */
+ if (kn == kn_info || kn->parent == kn_info)
+ return NULL;
+ else
+ return kn->priv;
+ } else {
+ return kn->parent->priv;
+ }
+}
+
+struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
+{
+ struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
+
+ if (!rdtgrp)
+ return NULL;
+
+ atomic_inc(&rdtgrp->waitcount);
+ kernfs_break_active_protection(kn);
+
+ mutex_lock(&rdtgroup_mutex);
+
+ /* Was this group deleted while we waited? */
+ if (rdtgrp->flags & RDT_DELETED)
+ return NULL;
+
+ return rdtgrp;
+}
+
+void rdtgroup_kn_unlock(struct kernfs_node *kn)
+{
+ struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
+
+ if (!rdtgrp)
+ return;
+
+ mutex_unlock(&rdtgroup_mutex);
+
+ if (atomic_dec_and_test(&rdtgrp->waitcount) &&
+ (rdtgrp->flags & RDT_DELETED)) {
+ kernfs_unbreak_active_protection(kn);
+ kernfs_put(kn);
+ kfree(rdtgrp);
+ } else {
+ kernfs_unbreak_active_protection(kn);
+ }
+}
+
+static struct dentry *rdt_mount(struct file_system_type *fs_type,
+ int flags, const char *unused_dev_name,
+ void *data)
+{
+ struct dentry *dentry;
+ int ret;
+
+ mutex_lock(&rdtgroup_mutex);
+ /*
+ * resctrl file system can only be mounted once.
+ */
+ if (static_branch_unlikely(&rdt_enable_key)) {
+ dentry = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ ret = parse_rdtgroupfs_options(data);
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_cdp;
+ }
+
+ closid_init();
+
+ ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
+ if (ret) {
+ dentry = ERR_PTR(ret);
+ goto out_cdp;
+ }
+
+ dentry = kernfs_mount(fs_type, flags, rdt_root,
+ RDTGROUP_SUPER_MAGIC, NULL);
+ if (IS_ERR(dentry))
+ goto out_cdp;
+
+ static_branch_enable(&rdt_enable_key);
+ goto out;
+
+out_cdp:
+ cdp_disable();
+out:
+ mutex_unlock(&rdtgroup_mutex);
+
+ return dentry;
+}
+
+static int reset_all_cbms(struct rdt_resource *r)
+{
+ struct msr_param msr_param;
+ cpumask_var_t cpu_mask;
+ struct rdt_domain *d;
+ int i, cpu;
+
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ msr_param.res = r;
+ msr_param.low = 0;
+ msr_param.high = r->num_closid;
+
+ /*
+ * Disable resource control for this resource by setting all
+ * CBMs in all domains to the maximum mask value. Pick one CPU
+ * from each domain to update the MSRs below.
+ */
+ list_for_each_entry(d, &r->domains, list) {
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+
+ for (i = 0; i < r->num_closid; i++)
+ d->cbm[i] = r->max_cbm;
+ }
+ cpu = get_cpu();
+ /* Update CBM on this cpu if it's in cpu_mask. */
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ rdt_cbm_update(&msr_param);
+ /* Update CBM on all other cpus in cpu_mask. */
+ smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+ put_cpu();
+
+ free_cpumask_var(cpu_mask);
+
+ return 0;
+}
+
+/*
+ * Move tasks from one to the other group. If @from is NULL, then all tasks
+ * in the systems are moved unconditionally (used for teardown).
+ *
+ * If @mask is not NULL the cpus on which moved tasks are running are set
+ * in that mask so the update smp function call is restricted to affected
+ * cpus.
+ */
+static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
+ struct cpumask *mask)
+{
+ struct task_struct *p, *t;
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(p, t) {
+ if (!from || t->closid == from->closid) {
+ t->closid = to->closid;
+#ifdef CONFIG_SMP
+ /*
+ * This is safe on x86 w/o barriers as the ordering
+ * of writing to task_cpu() and t->on_cpu is
+ * reverse to the reading here. The detection is
+ * inaccurate as tasks might move or schedule
+ * before the smp function call takes place. In
+ * such a case the function call is pointless, but
+ * there is no other side effect.
+ */
+ if (mask && t->on_cpu)
+ cpumask_set_cpu(task_cpu(t), mask);
+#endif
+ }
+ }
+ read_unlock(&tasklist_lock);
+}
+
+/*
+ * Forcibly remove all of subdirectories under root.
+ */
+static void rmdir_all_sub(void)
+{
+ struct rdtgroup *rdtgrp, *tmp;
+
+ /* Move all tasks to the default resource group */
+ rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
+
+ list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
+ /* Remove each rdtgroup other than root */
+ if (rdtgrp == &rdtgroup_default)
+ continue;
+
+ /*
+ * Give any CPUs back to the default group. We cannot copy
+ * cpu_online_mask because a CPU might have executed the
+ * offline callback already, but is still marked online.
+ */
+ cpumask_or(&rdtgroup_default.cpu_mask,
+ &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+
+ kernfs_remove(rdtgrp->kn);
+ list_del(&rdtgrp->rdtgroup_list);
+ kfree(rdtgrp);
+ }
+ /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
+ get_online_cpus();
+ rdt_update_closid(cpu_online_mask, &rdtgroup_default.closid);
+ put_online_cpus();
+
+ kernfs_remove(kn_info);
+}
+
+static void rdt_kill_sb(struct super_block *sb)
+{
+ struct rdt_resource *r;
+
+ mutex_lock(&rdtgroup_mutex);
+
+ /*Put everything back to default values. */
+ for_each_enabled_rdt_resource(r)
+ reset_all_cbms(r);
+ cdp_disable();
+ rmdir_all_sub();
+ static_branch_disable(&rdt_enable_key);
+ kernfs_kill_sb(sb);
+ mutex_unlock(&rdtgroup_mutex);
+}
+
+static struct file_system_type rdt_fs_type = {
+ .name = "resctrl",
+ .mount = rdt_mount,
+ .kill_sb = rdt_kill_sb,
+};
+
+static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
+ umode_t mode)
+{
+ struct rdtgroup *parent, *rdtgrp;
+ struct kernfs_node *kn;
+ int ret, closid;
+
+ /* Only allow mkdir in the root directory */
+ if (parent_kn != rdtgroup_default.kn)
+ return -EPERM;
+
+ /* Do not accept '\n' to avoid unparsable situation. */
+ if (strchr(name, '\n'))
+ return -EINVAL;
+
+ parent = rdtgroup_kn_lock_live(parent_kn);
+ if (!parent) {
+ ret = -ENODEV;
+ goto out_unlock;
+ }
+
+ ret = closid_alloc();
+ if (ret < 0)
+ goto out_unlock;
+ closid = ret;
+
+ /* allocate the rdtgroup. */
+ rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
+ if (!rdtgrp) {
+ ret = -ENOSPC;
+ goto out_closid_free;
+ }
+ rdtgrp->closid = closid;
+ list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
+
+ /* kernfs creates the directory for rdtgrp */
+ kn = kernfs_create_dir(parent->kn, name, mode, rdtgrp);
+ if (IS_ERR(kn)) {
+ ret = PTR_ERR(kn);
+ goto out_cancel_ref;
+ }
+ rdtgrp->kn = kn;
+
+ /*
+ * kernfs_remove() will drop the reference count on "kn" which
+ * will free it. But we still need it to stick around for the
+ * rdtgroup_kn_unlock(kn} call below. Take one extra reference
+ * here, which will be dropped inside rdtgroup_kn_unlock().
+ */
+ kernfs_get(kn);
+
+ ret = rdtgroup_kn_set_ugid(kn);
+ if (ret)
+ goto out_destroy;
+
+ ret = rdtgroup_add_files(kn, rdtgroup_base_files,
+ ARRAY_SIZE(rdtgroup_base_files));
+ if (ret)
+ goto out_destroy;
+
+ kernfs_activate(kn);
+
+ ret = 0;
+ goto out_unlock;
+
+out_destroy:
+ kernfs_remove(rdtgrp->kn);
+out_cancel_ref:
+ list_del(&rdtgrp->rdtgroup_list);
+ kfree(rdtgrp);
+out_closid_free:
+ closid_free(closid);
+out_unlock:
+ rdtgroup_kn_unlock(parent_kn);
+ return ret;
+}
+
+static int rdtgroup_rmdir(struct kernfs_node *kn)
+{
+ int ret, cpu, closid = rdtgroup_default.closid;
+ struct rdtgroup *rdtgrp;
+ cpumask_var_t tmpmask;
+
+ if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
+ return -ENOMEM;
+
+ rdtgrp = rdtgroup_kn_lock_live(kn);
+ if (!rdtgrp) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Give any tasks back to the default group */
+ rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
+
+ /* Give any CPUs back to the default group */
+ cpumask_or(&rdtgroup_default.cpu_mask,
+ &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
+
+ /* Update per cpu closid of the moved CPUs first */
+ for_each_cpu(cpu, &rdtgrp->cpu_mask)
+ per_cpu(cpu_closid, cpu) = closid;
+ /*
+ * Update the MSR on moved CPUs and CPUs which have moved
+ * task running on them.
+ */
+ cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
+ rdt_update_closid(tmpmask, NULL);
+
+ rdtgrp->flags = RDT_DELETED;
+ closid_free(rdtgrp->closid);
+ list_del(&rdtgrp->rdtgroup_list);
+
+ /*
+ * one extra hold on this, will drop when we kfree(rdtgrp)
+ * in rdtgroup_kn_unlock()
+ */
+ kernfs_get(kn);
+ kernfs_remove(rdtgrp->kn);
+ ret = 0;
+out:
+ rdtgroup_kn_unlock(kn);
+ free_cpumask_var(tmpmask);
+ return ret;
+}
+
+static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
+ .mkdir = rdtgroup_mkdir,
+ .rmdir = rdtgroup_rmdir,
+};
+
+static int __init rdtgroup_setup_root(void)
+{
+ int ret;
+
+ rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
+ KERNFS_ROOT_CREATE_DEACTIVATED,
+ &rdtgroup_default);
+ if (IS_ERR(rdt_root))
+ return PTR_ERR(rdt_root);
+
+ mutex_lock(&rdtgroup_mutex);
+
+ rdtgroup_default.closid = 0;
+ list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
+
+ ret = rdtgroup_add_files(rdt_root->kn, rdtgroup_base_files,
+ ARRAY_SIZE(rdtgroup_base_files));
+ if (ret) {
+ kernfs_destroy_root(rdt_root);
+ goto out;
+ }
+
+ rdtgroup_default.kn = rdt_root->kn;
+ kernfs_activate(rdtgroup_default.kn);
+
+out:
+ mutex_unlock(&rdtgroup_mutex);
+
+ return ret;
+}
+
+/*
+ * rdtgroup_init - rdtgroup initialization
+ *
+ * Setup resctrl file system including set up root, create mount point,
+ * register rdtgroup filesystem, and initialize files under root directory.
+ *
+ * Return: 0 on success or -errno
+ */
+int __init rdtgroup_init(void)
+{
+ int ret = 0;
+
+ ret = rdtgroup_setup_root();
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_mount_point(fs_kobj, "resctrl");
+ if (ret)
+ goto cleanup_root;
+
+ ret = register_filesystem(&rdt_fs_type);
+ if (ret)
+ goto cleanup_mountpoint;
+
+ return 0;
+
+cleanup_mountpoint:
+ sysfs_remove_mount_point(fs_kobj, "resctrl");
+cleanup_root:
+ kernfs_destroy_root(rdt_root);
+
+ return ret;
+}
diff --git a/arch/x86/kernel/cpu/intel_rdt_schemata.c b/arch/x86/kernel/cpu/intel_rdt_schemata.c
new file mode 100644
index 000000000000..f369cb8db0d5
--- /dev/null
+++ b/arch/x86/kernel/cpu/intel_rdt_schemata.c
@@ -0,0 +1,245 @@
+/*
+ * Resource Director Technology(RDT)
+ * - Cache Allocation code.
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Authors:
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Tony Luck <tony.luck@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * More information about RDT be found in the Intel (R) x86 Architecture
+ * Software Developer Manual June 2016, volume 3, section 17.17.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernfs.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <asm/intel_rdt.h>
+
+/*
+ * Check whether a cache bit mask is valid. The SDM says:
+ * Please note that all (and only) contiguous '1' combinations
+ * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
+ * Additionally Haswell requires at least two bits set.
+ */
+static bool cbm_validate(unsigned long var, struct rdt_resource *r)
+{
+ unsigned long first_bit, zero_bit;
+
+ if (var == 0 || var > r->max_cbm)
+ return false;
+
+ first_bit = find_first_bit(&var, r->cbm_len);
+ zero_bit = find_next_zero_bit(&var, r->cbm_len, first_bit);
+
+ if (find_next_bit(&var, r->cbm_len, zero_bit) < r->cbm_len)
+ return false;
+
+ if ((zero_bit - first_bit) < r->min_cbm_bits)
+ return false;
+ return true;
+}
+
+/*
+ * Read one cache bit mask (hex). Check that it is valid for the current
+ * resource type.
+ */
+static int parse_cbm(char *buf, struct rdt_resource *r)
+{
+ unsigned long data;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &data);
+ if (ret)
+ return ret;
+ if (!cbm_validate(data, r))
+ return -EINVAL;
+ r->tmp_cbms[r->num_tmp_cbms++] = data;
+
+ return 0;
+}
+
+/*
+ * For each domain in this resource we expect to find a series of:
+ * id=mask
+ * separated by ";". The "id" is in decimal, and must appear in the
+ * right order.
+ */
+static int parse_line(char *line, struct rdt_resource *r)
+{
+ char *dom = NULL, *id;
+ struct rdt_domain *d;
+ unsigned long dom_id;
+
+ list_for_each_entry(d, &r->domains, list) {
+ dom = strsep(&line, ";");
+ if (!dom)
+ return -EINVAL;
+ id = strsep(&dom, "=");
+ if (kstrtoul(id, 10, &dom_id) || dom_id != d->id)
+ return -EINVAL;
+ if (parse_cbm(dom, r))
+ return -EINVAL;
+ }
+
+ /* Any garbage at the end of the line? */
+ if (line && line[0])
+ return -EINVAL;
+ return 0;
+}
+
+static int update_domains(struct rdt_resource *r, int closid)
+{
+ struct msr_param msr_param;
+ cpumask_var_t cpu_mask;
+ struct rdt_domain *d;
+ int cpu, idx = 0;
+
+ if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ msr_param.low = closid;
+ msr_param.high = msr_param.low + 1;
+ msr_param.res = r;
+
+ list_for_each_entry(d, &r->domains, list) {
+ cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
+ d->cbm[msr_param.low] = r->tmp_cbms[idx++];
+ }
+ cpu = get_cpu();
+ /* Update CBM on this cpu if it's in cpu_mask. */
+ if (cpumask_test_cpu(cpu, cpu_mask))
+ rdt_cbm_update(&msr_param);
+ /* Update CBM on other cpus. */
+ smp_call_function_many(cpu_mask, rdt_cbm_update, &msr_param, 1);
+ put_cpu();
+
+ free_cpumask_var(cpu_mask);
+
+ return 0;
+}
+
+ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+ char *tok, *resname;
+ int closid, ret = 0;
+ u32 *l3_cbms = NULL;
+
+ /* Valid input requires a trailing newline */
+ if (nbytes == 0 || buf[nbytes - 1] != '\n')
+ return -EINVAL;
+ buf[nbytes - 1] = '\0';
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (!rdtgrp) {
+ rdtgroup_kn_unlock(of->kn);
+ return -ENOENT;
+ }
+
+ closid = rdtgrp->closid;
+
+ /* get scratch space to save all the masks while we validate input */
+ for_each_enabled_rdt_resource(r) {
+ r->tmp_cbms = kcalloc(r->num_domains, sizeof(*l3_cbms),
+ GFP_KERNEL);
+ if (!r->tmp_cbms) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ r->num_tmp_cbms = 0;
+ }
+
+ while ((tok = strsep(&buf, "\n")) != NULL) {
+ resname = strsep(&tok, ":");
+ if (!tok) {
+ ret = -EINVAL;
+ goto out;
+ }
+ for_each_enabled_rdt_resource(r) {
+ if (!strcmp(resname, r->name) &&
+ closid < r->num_closid) {
+ ret = parse_line(tok, r);
+ if (ret)
+ goto out;
+ break;
+ }
+ }
+ if (!r->name) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Did the parser find all the masks we need? */
+ for_each_enabled_rdt_resource(r) {
+ if (r->num_tmp_cbms != r->num_domains) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ for_each_enabled_rdt_resource(r) {
+ ret = update_domains(r, closid);
+ if (ret)
+ goto out;
+ }
+
+out:
+ rdtgroup_kn_unlock(of->kn);
+ for_each_enabled_rdt_resource(r) {
+ kfree(r->tmp_cbms);
+ r->tmp_cbms = NULL;
+ }
+ return ret ?: nbytes;
+}
+
+static void show_doms(struct seq_file *s, struct rdt_resource *r, int closid)
+{
+ struct rdt_domain *dom;
+ bool sep = false;
+
+ seq_printf(s, "%s:", r->name);
+ list_for_each_entry(dom, &r->domains, list) {
+ if (sep)
+ seq_puts(s, ";");
+ seq_printf(s, "%d=%x", dom->id, dom->cbm[closid]);
+ sep = true;
+ }
+ seq_puts(s, "\n");
+}
+
+int rdtgroup_schemata_show(struct kernfs_open_file *of,
+ struct seq_file *s, void *v)
+{
+ struct rdtgroup *rdtgrp;
+ struct rdt_resource *r;
+ int closid, ret = 0;
+
+ rdtgrp = rdtgroup_kn_lock_live(of->kn);
+ if (rdtgrp) {
+ closid = rdtgrp->closid;
+ for_each_enabled_rdt_resource(r) {
+ if (closid < r->num_closid)
+ show_doms(s, r, closid);
+ }
+ } else {
+ ret = -ENOENT;
+ }
+ rdtgroup_kn_unlock(of->kn);
+ return ret;
+}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 631356c8cca4..c7efbcfbeda6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -311,7 +311,7 @@ static int mce_severity_intel(struct mce *m, int tolerant, char **msg, bool is_e
*msg = s->msg;
s->covered = 1;
if (s->sev >= MCE_UC_SEVERITY && ctx == IN_KERNEL) {
- if (panic_on_oops || tolerant < 1)
+ if (tolerant < 1)
return MCE_PANIC_SEVERITY;
}
return s->sev;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a7fdf453d895..132e1ec67da0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -43,6 +43,7 @@
#include <linux/export.h>
#include <linux/jump_label.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/traps.h>
#include <asm/tlbflush.h>
@@ -135,6 +136,9 @@ void mce_setup(struct mce *m)
m->socketid = cpu_data(m->extcpu).phys_proc_id;
m->apicid = cpu_data(m->extcpu).initial_apicid;
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
+
+ if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
+ rdmsrl(MSR_PPIN, m->ppin);
}
DEFINE_PER_CPU(struct mce, injectm);
@@ -207,8 +211,12 @@ EXPORT_SYMBOL_GPL(mce_inject_log);
static struct notifier_block mce_srao_nb;
+static atomic_t num_notifiers;
+
void mce_register_decode_chain(struct notifier_block *nb)
{
+ atomic_inc(&num_notifiers);
+
/* Ensure SRAO notifier has the highest priority in the decode chain. */
if (nb != &mce_srao_nb && nb->priority == INT_MAX)
nb->priority -= 1;
@@ -219,6 +227,8 @@ EXPORT_SYMBOL_GPL(mce_register_decode_chain);
void mce_unregister_decode_chain(struct notifier_block *nb)
{
+ atomic_dec(&num_notifiers);
+
atomic_notifier_chain_unregister(&x86_mce_decoder_chain, nb);
}
EXPORT_SYMBOL_GPL(mce_unregister_decode_chain);
@@ -270,17 +280,17 @@ struct mca_msr_regs msr_ops = {
.misc = misc_reg
};
-static void print_mce(struct mce *m)
+static void __print_mce(struct mce *m)
{
- int ret = 0;
-
- pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
- m->extcpu, m->mcgstatus, m->bank, m->status);
+ pr_emerg(HW_ERR "CPU %d: Machine Check%s: %Lx Bank %d: %016Lx\n",
+ m->extcpu,
+ (m->mcgstatus & MCG_STATUS_MCIP ? " Exception" : ""),
+ m->mcgstatus, m->bank, m->status);
if (m->ip) {
pr_emerg(HW_ERR "RIP%s %02x:<%016Lx> ",
!(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
- m->cs, m->ip);
+ m->cs, m->ip);
if (m->cs == __KERNEL_CS)
print_symbol("{%s}", m->ip);
@@ -308,6 +318,13 @@ static void print_mce(struct mce *m)
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
cpu_data(m->extcpu).microcode);
+}
+
+static void print_mce(struct mce *m)
+{
+ int ret = 0;
+
+ __print_mce(m);
/*
* Print out human-readable details about the MCE error,
@@ -569,6 +586,32 @@ static struct notifier_block mce_srao_nb = {
.priority = INT_MAX,
};
+static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct mce *m = (struct mce *)data;
+
+ if (!m)
+ return NOTIFY_DONE;
+
+ /*
+ * Run the default notifier if we have only the SRAO
+ * notifier and us registered.
+ */
+ if (atomic_read(&num_notifiers) > 2)
+ return NOTIFY_DONE;
+
+ __print_mce(m);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mce_default_nb = {
+ .notifier_call = mce_default_notifier,
+ /* lowest prio, we want it to run last. */
+ .priority = 0,
+};
+
/*
* Read ADDR and MISC registers.
*/
@@ -667,6 +710,15 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
mce_gather_info(&m, NULL);
+ /*
+ * m.tsc was set in mce_setup(). Clear it if not requested.
+ *
+ * FIXME: Propagate @flags to mce_gather_info/mce_setup() to avoid
+ * that dance.
+ */
+ if (!(flags & MCP_TIMESTAMP))
+ m.tsc = 0;
+
for (i = 0; i < mca_cfg.banks; i++) {
if (!mce_banks[i].ctl || !test_bit(i, *b))
continue;
@@ -674,14 +726,12 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
m.misc = 0;
m.addr = 0;
m.bank = i;
- m.tsc = 0;
barrier();
m.status = mce_rdmsrl(msr_ops.status(i));
if (!(m.status & MCI_STATUS_VAL))
continue;
-
/*
* Uncorrected or signalled events are handled by the exception
* handler when it is enabled, so don't process those here.
@@ -696,9 +746,6 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
mce_read_aux(&m, i);
- if (!(flags & MCP_TIMESTAMP))
- m.tsc = 0;
-
severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
@@ -1355,7 +1402,7 @@ static void mce_timer_fn(unsigned long data)
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
- machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_poll_banks));
+ machine_check_poll(0, this_cpu_ptr(&mce_poll_banks));
if (mce_intel_cmci_poll()) {
iv = mce_adjust_timer(iv);
@@ -1745,6 +1792,14 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
add_timer_on(t, cpu);
}
+static void __mcheck_cpu_setup_timer(void)
+{
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
+ unsigned int cpu = smp_processor_id();
+
+ setup_pinned_timer(t, mce_timer_fn, cpu);
+}
+
static void __mcheck_cpu_init_timer(void)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
@@ -1796,7 +1851,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_clear_banks();
- __mcheck_cpu_init_timer();
+ __mcheck_cpu_setup_timer();
}
/*
@@ -2138,6 +2193,7 @@ int __init mcheck_init(void)
{
mcheck_intel_therm_init();
mce_register_decode_chain(&mce_srao_nb);
+ mce_register_decode_chain(&mce_default_nb);
mcheck_vendor_init_severity();
INIT_WORK(&mce_work, mce_process_work);
@@ -2255,8 +2311,6 @@ static struct bus_type mce_subsys = {
DEFINE_PER_CPU(struct device *, mce_device);
-void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
-
static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
{
return container_of(attr, struct mce_bank, attr);
@@ -2409,6 +2463,10 @@ static int mce_device_create(unsigned int cpu)
if (!mce_available(&boot_cpu_data))
return -EIO;
+ dev = per_cpu(mce_device, cpu);
+ if (dev)
+ return 0;
+
dev = kzalloc(sizeof *dev, GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -2468,28 +2526,25 @@ static void mce_device_remove(unsigned int cpu)
}
/* Make sure there are no machine checks on offlined CPUs. */
-static void mce_disable_cpu(void *h)
+static void mce_disable_cpu(void)
{
- unsigned long action = *(unsigned long *)h;
-
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
- if (!(action & CPU_TASKS_FROZEN))
+ if (!cpuhp_tasks_frozen)
cmci_clear();
vendor_disable_error_reporting();
}
-static void mce_reenable_cpu(void *h)
+static void mce_reenable_cpu(void)
{
- unsigned long action = *(unsigned long *)h;
int i;
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
- if (!(action & CPU_TASKS_FROZEN))
+ if (!cpuhp_tasks_frozen)
cmci_reenable();
for (i = 0; i < mca_cfg.banks; i++) {
struct mce_bank *b = &mce_banks[i];
@@ -2499,45 +2554,43 @@ static void mce_reenable_cpu(void *h)
}
}
-/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
-mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+static int mce_cpu_dead(unsigned int cpu)
+{
+ mce_intel_hcpu_update(cpu);
+
+ /* intentionally ignoring frozen here */
+ if (!cpuhp_tasks_frozen)
+ cmci_rediscover();
+ return 0;
+}
+
+static int mce_cpu_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
struct timer_list *t = &per_cpu(mce_timer, cpu);
+ int ret;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- mce_device_create(cpu);
- if (threshold_cpu_callback)
- threshold_cpu_callback(action, cpu);
- break;
- case CPU_DEAD:
- if (threshold_cpu_callback)
- threshold_cpu_callback(action, cpu);
- mce_device_remove(cpu);
- mce_intel_hcpu_update(cpu);
+ mce_device_create(cpu);
- /* intentionally ignoring frozen here */
- if (!(action & CPU_TASKS_FROZEN))
- cmci_rediscover();
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
- del_timer_sync(t);
- break;
- case CPU_DOWN_FAILED:
- smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
- mce_start_timer(cpu, t);
- break;
+ ret = mce_threshold_create_device(cpu);
+ if (ret) {
+ mce_device_remove(cpu);
+ return ret;
}
-
- return NOTIFY_OK;
+ mce_reenable_cpu();
+ mce_start_timer(cpu, t);
+ return 0;
}
-static struct notifier_block mce_cpu_notifier = {
- .notifier_call = mce_cpu_callback,
-};
+static int mce_cpu_pre_down(unsigned int cpu)
+{
+ struct timer_list *t = &per_cpu(mce_timer, cpu);
+
+ mce_disable_cpu();
+ del_timer_sync(t);
+ mce_threshold_remove_device(cpu);
+ mce_device_remove(cpu);
+ return 0;
+}
static __init void mce_init_banks(void)
{
@@ -2559,8 +2612,8 @@ static __init void mce_init_banks(void)
static __init int mcheck_init_device(void)
{
+ enum cpuhp_state hp_online;
int err;
- int i = 0;
if (!mce_available(&boot_cpu_data)) {
err = -EIO;
@@ -2578,23 +2631,16 @@ static __init int mcheck_init_device(void)
if (err)
goto err_out_mem;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- err = mce_device_create(i);
- if (err) {
- /*
- * Register notifier anyway (and do not unreg it) so
- * that we don't leave undeleted timers, see notifier
- * callback above.
- */
- __register_hotcpu_notifier(&mce_cpu_notifier);
- cpu_notifier_register_done();
- goto err_device_create;
- }
- }
+ err = cpuhp_setup_state(CPUHP_X86_MCE_DEAD, "x86/mce:dead", NULL,
+ mce_cpu_dead);
+ if (err)
+ goto err_out_mem;
- __register_hotcpu_notifier(&mce_cpu_notifier);
- cpu_notifier_register_done();
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/mce:online",
+ mce_cpu_online, mce_cpu_pre_down);
+ if (err < 0)
+ goto err_out_online;
+ hp_online = err;
register_syscore_ops(&mce_syscore_ops);
@@ -2607,16 +2653,10 @@ static __init int mcheck_init_device(void)
err_register:
unregister_syscore_ops(&mce_syscore_ops);
+ cpuhp_remove_state(hp_online);
-err_device_create:
- /*
- * We didn't keep track of which devices were created above, but
- * even if we had, the set of online cpus might have changed.
- * Play safe and remove for every possible cpu, since
- * mce_device_remove() will do the right thing.
- */
- for_each_possible_cpu(i)
- mce_device_remove(i);
+err_out_online:
+ cpuhp_remove_state(CPUHP_X86_MCE_DEAD);
err_out_mem:
free_cpumask_var(mce_device_initialized);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9b5403462936..3d97f3eeec2c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -55,6 +55,8 @@
/* Threshold LVT offset is at MSR0xC0000410[15:12] */
#define SMCA_THR_LVT_OFF 0xF000
+static bool thresholding_en;
+
static const char * const th_names[] = {
"load_store",
"insn_fetch",
@@ -69,7 +71,12 @@ static const char * const smca_umc_block_names[] = {
"misc_umc"
};
-struct smca_bank_name smca_bank_names[] = {
+struct smca_bank_name {
+ const char *name; /* Short name for sysfs */
+ const char *long_name; /* Long name for pretty-printing */
+};
+
+static struct smca_bank_name smca_names[] = {
[SMCA_LS] = { "load_store", "Load Store Unit" },
[SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
[SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
@@ -84,9 +91,25 @@ struct smca_bank_name smca_bank_names[] = {
[SMCA_PSP] = { "psp", "Platform Security Processor" },
[SMCA_SMU] = { "smu", "System Management Unit" },
};
-EXPORT_SYMBOL_GPL(smca_bank_names);
-static struct smca_hwid_mcatype smca_hwid_mcatypes[] = {
+const char *smca_get_name(enum smca_bank_types t)
+{
+ if (t >= N_SMCA_BANK_TYPES)
+ return NULL;
+
+ return smca_names[t].name;
+}
+
+const char *smca_get_long_name(enum smca_bank_types t)
+{
+ if (t >= N_SMCA_BANK_TYPES)
+ return NULL;
+
+ return smca_names[t].long_name;
+}
+EXPORT_SYMBOL_GPL(smca_get_long_name);
+
+static struct smca_hwid smca_hwid_mcatypes[] = {
/* { bank_type, hwid_mcatype, xec_bitmap } */
/* ZN Core (HWID=0xB0) MCA types */
@@ -116,7 +139,7 @@ static struct smca_hwid_mcatype smca_hwid_mcatypes[] = {
{ SMCA_SMU, HWID_MCATYPE(0x01, 0x0), 0x1 },
};
-struct smca_bank_info smca_banks[MAX_NR_BANKS];
+struct smca_bank smca_banks[MAX_NR_BANKS];
EXPORT_SYMBOL_GPL(smca_banks);
/*
@@ -142,35 +165,34 @@ static void default_deferred_error_interrupt(void)
}
void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
-/*
- * CPU Initialization
- */
-
static void get_smca_bank_info(unsigned int bank)
{
unsigned int i, hwid_mcatype, cpu = smp_processor_id();
- struct smca_hwid_mcatype *type;
- u32 high, instanceId;
- u16 hwid, mcatype;
+ struct smca_hwid *s_hwid;
+ u32 high, instance_id;
/* Collect bank_info using CPU 0 for now. */
if (cpu)
return;
- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instanceId, &high)) {
+ if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &instance_id, &high)) {
pr_warn("Failed to read MCA_IPID for bank %d\n", bank);
return;
}
- hwid = high & MCI_IPID_HWID;
- mcatype = (high & MCI_IPID_MCATYPE) >> 16;
- hwid_mcatype = HWID_MCATYPE(hwid, mcatype);
+ hwid_mcatype = HWID_MCATYPE(high & MCI_IPID_HWID,
+ (high & MCI_IPID_MCATYPE) >> 16);
for (i = 0; i < ARRAY_SIZE(smca_hwid_mcatypes); i++) {
- type = &smca_hwid_mcatypes[i];
- if (hwid_mcatype == type->hwid_mcatype) {
- smca_banks[bank].type = type;
- smca_banks[bank].type_instance = instanceId;
+ s_hwid = &smca_hwid_mcatypes[i];
+ if (hwid_mcatype == s_hwid->hwid_mcatype) {
+
+ WARN(smca_banks[bank].hwid,
+ "Bank %s already initialized!\n",
+ smca_get_name(s_hwid->bank_type));
+
+ smca_banks[bank].hwid = s_hwid;
+ smca_banks[bank].id = instance_id;
break;
}
}
@@ -533,6 +555,206 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
deferred_error_interrupt_enable(c);
}
+int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
+{
+ u64 dram_base_addr, dram_limit_addr, dram_hole_base;
+ /* We start from the normalized address */
+ u64 ret_addr = norm_addr;
+
+ u32 tmp;
+
+ u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
+ u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
+ u8 intlv_addr_sel, intlv_addr_bit;
+ u8 num_intlv_bits, hashed_bit;
+ u8 lgcy_mmio_hole_en, base = 0;
+ u8 cs_mask, cs_id = 0;
+ bool hash_enabled = false;
+
+ /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
+ if (amd_df_indirect_read(nid, 0, 0x1B4, umc, &tmp))
+ goto out_err;
+
+ /* Remove HiAddrOffset from normalized address, if enabled: */
+ if (tmp & BIT(0)) {
+ u64 hi_addr_offset = (tmp & GENMASK_ULL(31, 20)) << 8;
+
+ if (norm_addr >= hi_addr_offset) {
+ ret_addr -= hi_addr_offset;
+ base = 1;
+ }
+ }
+
+ /* Read D18F0x110 (DramBaseAddress). */
+ if (amd_df_indirect_read(nid, 0, 0x110 + (8 * base), umc, &tmp))
+ goto out_err;
+
+ /* Check if address range is valid. */
+ if (!(tmp & BIT(0))) {
+ pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
+ __func__, tmp);
+ goto out_err;
+ }
+
+ lgcy_mmio_hole_en = tmp & BIT(1);
+ intlv_num_chan = (tmp >> 4) & 0xF;
+ intlv_addr_sel = (tmp >> 8) & 0x7;
+ dram_base_addr = (tmp & GENMASK_ULL(31, 12)) << 16;
+
+ /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
+ if (intlv_addr_sel > 3) {
+ pr_err("%s: Invalid interleave address select %d.\n",
+ __func__, intlv_addr_sel);
+ goto out_err;
+ }
+
+ /* Read D18F0x114 (DramLimitAddress). */
+ if (amd_df_indirect_read(nid, 0, 0x114 + (8 * base), umc, &tmp))
+ goto out_err;
+
+ intlv_num_sockets = (tmp >> 8) & 0x1;
+ intlv_num_dies = (tmp >> 10) & 0x3;
+ dram_limit_addr = ((tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
+
+ intlv_addr_bit = intlv_addr_sel + 8;
+
+ /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
+ switch (intlv_num_chan) {
+ case 0: intlv_num_chan = 0; break;
+ case 1: intlv_num_chan = 1; break;
+ case 3: intlv_num_chan = 2; break;
+ case 5: intlv_num_chan = 3; break;
+ case 7: intlv_num_chan = 4; break;
+
+ case 8: intlv_num_chan = 1;
+ hash_enabled = true;
+ break;
+ default:
+ pr_err("%s: Invalid number of interleaved channels %d.\n",
+ __func__, intlv_num_chan);
+ goto out_err;
+ }
+
+ num_intlv_bits = intlv_num_chan;
+
+ if (intlv_num_dies > 2) {
+ pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
+ __func__, intlv_num_dies);
+ goto out_err;
+ }
+
+ num_intlv_bits += intlv_num_dies;
+
+ /* Add a bit if sockets are interleaved. */
+ num_intlv_bits += intlv_num_sockets;
+
+ /* Assert num_intlv_bits <= 4 */
+ if (num_intlv_bits > 4) {
+ pr_err("%s: Invalid interleave bits %d.\n",
+ __func__, num_intlv_bits);
+ goto out_err;
+ }
+
+ if (num_intlv_bits > 0) {
+ u64 temp_addr_x, temp_addr_i, temp_addr_y;
+ u8 die_id_bit, sock_id_bit, cs_fabric_id;
+
+ /*
+ * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
+ * This is the fabric id for this coherent slave. Use
+ * umc/channel# as instance id of the coherent slave
+ * for FICAA.
+ */
+ if (amd_df_indirect_read(nid, 0, 0x50, umc, &tmp))
+ goto out_err;
+
+ cs_fabric_id = (tmp >> 8) & 0xFF;
+ die_id_bit = 0;
+
+ /* If interleaved over more than 1 channel: */
+ if (intlv_num_chan) {
+ die_id_bit = intlv_num_chan;
+ cs_mask = (1 << die_id_bit) - 1;
+ cs_id = cs_fabric_id & cs_mask;
+ }
+
+ sock_id_bit = die_id_bit;
+
+ /* Read D18F1x208 (SystemFabricIdMask). */
+ if (intlv_num_dies || intlv_num_sockets)
+ if (amd_df_indirect_read(nid, 1, 0x208, umc, &tmp))
+ goto out_err;
+
+ /* If interleaved over more than 1 die. */
+ if (intlv_num_dies) {
+ sock_id_bit = die_id_bit + intlv_num_dies;
+ die_id_shift = (tmp >> 24) & 0xF;
+ die_id_mask = (tmp >> 8) & 0xFF;
+
+ cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
+ }
+
+ /* If interleaved over more than 1 socket. */
+ if (intlv_num_sockets) {
+ socket_id_shift = (tmp >> 28) & 0xF;
+ socket_id_mask = (tmp >> 16) & 0xFF;
+
+ cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
+ }
+
+ /*
+ * The pre-interleaved address consists of XXXXXXIIIYYYYY
+ * where III is the ID for this CS, and XXXXXXYYYYY are the
+ * address bits from the post-interleaved address.
+ * "num_intlv_bits" has been calculated to tell us how many "I"
+ * bits there are. "intlv_addr_bit" tells us how many "Y" bits
+ * there are (where "I" starts).
+ */
+ temp_addr_y = ret_addr & GENMASK_ULL(intlv_addr_bit-1, 0);
+ temp_addr_i = (cs_id << intlv_addr_bit);
+ temp_addr_x = (ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
+ ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
+ }
+
+ /* Add dram base address */
+ ret_addr += dram_base_addr;
+
+ /* If legacy MMIO hole enabled */
+ if (lgcy_mmio_hole_en) {
+ if (amd_df_indirect_read(nid, 0, 0x104, umc, &tmp))
+ goto out_err;
+
+ dram_hole_base = tmp & GENMASK(31, 24);
+ if (ret_addr >= dram_hole_base)
+ ret_addr += (BIT_ULL(32) - dram_hole_base);
+ }
+
+ if (hash_enabled) {
+ /* Save some parentheses and grab ls-bit at the end. */
+ hashed_bit = (ret_addr >> 12) ^
+ (ret_addr >> 18) ^
+ (ret_addr >> 21) ^
+ (ret_addr >> 30) ^
+ cs_id;
+
+ hashed_bit &= BIT(0);
+
+ if (hashed_bit != ((ret_addr >> intlv_addr_bit) & BIT(0)))
+ ret_addr ^= BIT(intlv_addr_bit);
+ }
+
+ /* Is calculated system address is above DRAM limit address? */
+ if (ret_addr > dram_limit_addr)
+ goto out_err;
+
+ *sys_addr = ret_addr;
+ return 0;
+
+out_err:
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(umc_normaddr_to_sysaddr);
+
static void
__log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
{
@@ -645,6 +867,7 @@ static void amd_threshold_interrupt(void)
{
u32 low = 0, high = 0, address = 0;
unsigned int bank, block, cpu = smp_processor_id();
+ struct thresh_restart tr;
/* assume first bank caused it */
for (bank = 0; bank < mca_cfg.banks; ++bank) {
@@ -681,6 +904,11 @@ static void amd_threshold_interrupt(void)
log:
__log_error(bank, false, true, ((u64)high << 32) | low);
+
+ /* Reset threshold block after logging error. */
+ memset(&tr, 0, sizeof(tr));
+ tr.b = &per_cpu(threshold_banks, cpu)[bank]->blocks[block];
+ threshold_restart_bank(&tr);
}
/*
@@ -826,10 +1054,10 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
return th_names[bank];
}
- if (!smca_banks[bank].type)
+ if (!smca_banks[bank].hwid)
return NULL;
- bank_type = smca_banks[bank].type->bank_type;
+ bank_type = smca_banks[bank].hwid->bank_type;
if (b && bank_type == SMCA_UMC) {
if (b->block < ARRAY_SIZE(smca_umc_block_names))
@@ -838,8 +1066,8 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
}
snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
- "%s_%x", smca_bank_names[bank_type].name,
- smca_banks[bank].type_instance);
+ "%s_%x", smca_get_name(bank_type),
+ smca_banks[bank].id);
return buf_mcatype;
}
@@ -1010,31 +1238,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
return err;
}
-/* create dir/files for all valid threshold banks */
-static int threshold_create_device(unsigned int cpu)
-{
- unsigned int bank;
- struct threshold_bank **bp;
- int err = 0;
-
- bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
- GFP_KERNEL);
- if (!bp)
- return -ENOMEM;
-
- per_cpu(threshold_banks, cpu) = bp;
-
- for (bank = 0; bank < mca_cfg.banks; ++bank) {
- if (!(per_cpu(bank_map, cpu) & (1 << bank)))
- continue;
- err = threshold_create_bank(cpu, bank);
- if (err)
- return err;
- }
-
- return err;
-}
-
static void deallocate_threshold_block(unsigned int cpu,
unsigned int bank)
{
@@ -1102,48 +1305,71 @@ free_out:
per_cpu(threshold_banks, cpu)[bank] = NULL;
}
-static void threshold_remove_device(unsigned int cpu)
+int mce_threshold_remove_device(unsigned int cpu)
{
unsigned int bank;
+ if (!thresholding_en)
+ return 0;
+
for (bank = 0; bank < mca_cfg.banks; ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
threshold_remove_bank(cpu, bank);
}
kfree(per_cpu(threshold_banks, cpu));
+ per_cpu(threshold_banks, cpu) = NULL;
+ return 0;
}
-/* get notified when a cpu comes on/off */
-static void
-amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
+/* create dir/files for all valid threshold banks */
+int mce_threshold_create_device(unsigned int cpu)
{
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- threshold_create_device(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- threshold_remove_device(cpu);
- break;
- default:
- break;
+ unsigned int bank;
+ struct threshold_bank **bp;
+ int err = 0;
+
+ if (!thresholding_en)
+ return 0;
+
+ bp = per_cpu(threshold_banks, cpu);
+ if (bp)
+ return 0;
+
+ bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
+ GFP_KERNEL);
+ if (!bp)
+ return -ENOMEM;
+
+ per_cpu(threshold_banks, cpu) = bp;
+
+ for (bank = 0; bank < mca_cfg.banks; ++bank) {
+ if (!(per_cpu(bank_map, cpu) & (1 << bank)))
+ continue;
+ err = threshold_create_bank(cpu, bank);
+ if (err)
+ goto err;
}
+ return err;
+err:
+ mce_threshold_remove_device(cpu);
+ return err;
}
static __init int threshold_init_device(void)
{
unsigned lcpu = 0;
+ if (mce_threshold_vector == amd_threshold_interrupt)
+ thresholding_en = true;
+
/* to hit CPUs online before the notifier is up */
for_each_online_cpu(lcpu) {
- int err = threshold_create_device(lcpu);
+ int err = mce_threshold_create_device(lcpu);
if (err)
return err;
}
- threshold_cpu_callback = amd_64_threshold_cpu_callback;
return 0;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 1defb8ea882c..190b3e6cef4d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -11,6 +11,8 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <asm/apic.h>
+#include <asm/cpufeature.h>
+#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/mce.h>
@@ -130,7 +132,7 @@ bool mce_intel_cmci_poll(void)
* Reset the counter if we've logged an error in the last poll
* during the storm.
*/
- if (machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)))
+ if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)))
this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL);
else
this_cpu_dec(cmci_backoff_cnt);
@@ -342,7 +344,7 @@ void cmci_recheck(void)
return;
local_irq_save(flags);
- machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
+ machine_check_poll(0, this_cpu_ptr(&mce_banks_owned));
local_irq_restore(flags);
}
@@ -464,11 +466,46 @@ static void intel_clear_lmce(void)
wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
}
+static void intel_ppin_init(struct cpuinfo_x86 *c)
+{
+ unsigned long long val;
+
+ /*
+ * Even if testing the presence of the MSR would be enough, we don't
+ * want to risk the situation where other models reuse this MSR for
+ * other purposes.
+ */
+ switch (c->x86_model) {
+ case INTEL_FAM6_IVYBRIDGE_X:
+ case INTEL_FAM6_HASWELL_X:
+ case INTEL_FAM6_BROADWELL_XEON_D:
+ case INTEL_FAM6_BROADWELL_X:
+ case INTEL_FAM6_SKYLAKE_X:
+ if (rdmsrl_safe(MSR_PPIN_CTL, &val))
+ return;
+
+ if ((val & 3UL) == 1UL) {
+ /* PPIN available but disabled: */
+ return;
+ }
+
+ /* If PPIN is disabled, but not locked, try to enable: */
+ if (!(val & 3UL)) {
+ wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
+ rdmsrl_safe(MSR_PPIN_CTL, &val);
+ }
+
+ if ((val & 3UL) == 2UL)
+ set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
+ }
+}
+
void mce_intel_feature_init(struct cpuinfo_x86 *c)
{
intel_init_thermal(c);
intel_init_cmci();
intel_init_lmce();
+ intel_ppin_init(c);
}
void mce_intel_feature_clear(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 6b9dc4d18ccc..e1d74fd79d5f 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -271,58 +271,32 @@ static void thermal_throttle_remove_dev(struct device *dev)
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
-thermal_throttle_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int thermal_throttle_online(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- struct device *dev;
- int err = 0;
-
- dev = get_cpu_device(cpu);
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- err = thermal_throttle_add_dev(dev, cpu);
- WARN_ON(err);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- thermal_throttle_remove_dev(dev);
- break;
- }
- return notifier_from_errno(err);
+ struct device *dev = get_cpu_device(cpu);
+
+ return thermal_throttle_add_dev(dev, cpu);
}
-static struct notifier_block thermal_throttle_cpu_notifier =
+static int thermal_throttle_offline(unsigned int cpu)
{
- .notifier_call = thermal_throttle_cpu_callback,
-};
+ struct device *dev = get_cpu_device(cpu);
+
+ thermal_throttle_remove_dev(dev);
+ return 0;
+}
static __init int thermal_throttle_init_device(void)
{
- unsigned int cpu = 0;
- int err;
+ int ret;
if (!atomic_read(&therm_throt_en))
return 0;
- cpu_notifier_register_begin();
-
- /* connect live CPUs to sysfs */
- for_each_online_cpu(cpu) {
- err = thermal_throttle_add_dev(get_cpu_device(cpu), cpu);
- WARN_ON(err);
- }
-
- __register_hotcpu_notifier(&thermal_throttle_cpu_notifier);
- cpu_notifier_register_done();
-
- return 0;
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/therm:online",
+ thermal_throttle_online,
+ thermal_throttle_offline);
+ return ret < 0 ? ret : 0;
}
device_initcall(thermal_throttle_init_device);
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile
index 220b1a508513..ba12e8aa4a45 100644
--- a/arch/x86/kernel/cpu/microcode/Makefile
+++ b/arch/x86/kernel/cpu/microcode/Makefile
@@ -1,4 +1,4 @@
microcode-y := core.o
obj-$(CONFIG_MICROCODE) += microcode.o
-microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o
+microcode-$(CONFIG_MICROCODE_INTEL) += intel.o
microcode-$(CONFIG_MICROCODE_AMD) += amd.o
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 017bda12caae..6f353bdb3a25 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -5,6 +5,7 @@
* CPUs and later.
*
* Copyright (C) 2008-2011 Advanced Micro Devices Inc.
+ * 2013-2016 Borislav Petkov <bp@alien8.de>
*
* Author: Peter Oruba <peter.oruba@amd.com>
*
@@ -39,64 +40,25 @@
static struct equiv_cpu_entry *equiv_cpu_table;
-struct ucode_patch {
- struct list_head plist;
- void *data;
- u32 patch_id;
- u16 equiv_cpu;
-};
-
-static LIST_HEAD(pcache);
-
/*
* This points to the current valid container of microcode patches which we will
- * save from the initrd before jettisoning its contents.
+ * save from the initrd/builtin before jettisoning its contents.
*/
-static u8 *container;
-static size_t container_size;
-static bool ucode_builtin;
+struct container {
+ u8 *data;
+ size_t size;
+} cont;
static u32 ucode_new_rev;
static u8 amd_ucode_patch[PATCH_MAX_SIZE];
static u16 this_equiv_id;
-static struct cpio_data ucode_cpio;
-
-static struct cpio_data __init find_ucode_in_initrd(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- char *path;
- void *start;
- size_t size;
-
- /*
- * Microcode patch container file is prepended to the initrd in cpio
- * format. See Documentation/x86/early-microcode.txt
- */
- static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
-
-#ifdef CONFIG_X86_32
- struct boot_params *p;
-
- /*
- * On 32-bit, early load occurs before paging is turned on so we need
- * to use physical addresses.
- */
- p = (struct boot_params *)__pa_nodebug(&boot_params);
- path = (char *)__pa_nodebug(ucode_path);
- start = (void *)p->hdr.ramdisk_image;
- size = p->hdr.ramdisk_size;
-#else
- path = ucode_path;
- start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
- size = boot_params.hdr.ramdisk_size;
-#endif /* !CONFIG_X86_32 */
-
- return find_cpio_data(path, start, size, NULL);
-#else
- return (struct cpio_data){ NULL, 0, "" };
-#endif
-}
+/*
+ * Microcode patch container file is prepended to the initrd in cpio
+ * format. See Documentation/x86/early-microcode.txt
+ */
+static const char
+ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
static size_t compute_container_size(u8 *data, u32 total_size)
{
@@ -135,48 +97,48 @@ static size_t compute_container_size(u8 *data, u32 total_size)
return size;
}
+static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
+ unsigned int sig)
+{
+ int i = 0;
+
+ if (!equiv_cpu_table)
+ return 0;
+
+ while (equiv_cpu_table[i].installed_cpu != 0) {
+ if (sig == equiv_cpu_table[i].installed_cpu)
+ return equiv_cpu_table[i].equiv_cpu;
+
+ i++;
+ }
+ return 0;
+}
+
/*
- * Early load occurs before we can vmalloc(). So we look for the microcode
- * patch container file in initrd, traverse equivalent cpu table, look for a
- * matching microcode patch, and update, all in initrd memory in place.
- * When vmalloc() is available for use later -- on 64-bit during first AP load,
- * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
- * load_microcode_amd() to save equivalent cpu table and microcode patches in
- * kernel heap memory.
+ * This scans the ucode blob for the proper container as we can have multiple
+ * containers glued together.
*/
-static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
+static struct container
+find_proper_container(u8 *ucode, size_t size, u16 *ret_id)
{
+ struct container ret = { NULL, 0 };
+ u32 eax, ebx, ecx, edx;
struct equiv_cpu_entry *eq;
- size_t *cont_sz;
- u32 *header;
- u8 *data, **cont;
- u8 (*patch)[PATCH_MAX_SIZE];
- u16 eq_id = 0;
int offset, left;
- u32 rev, eax, ebx, ecx, edx;
- u32 *new_rev;
-
-#ifdef CONFIG_X86_32
- new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
- cont_sz = (size_t *)__pa_nodebug(&container_size);
- cont = (u8 **)__pa_nodebug(&container);
- patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
-#else
- new_rev = &ucode_new_rev;
- cont_sz = &container_size;
- cont = &container;
- patch = &amd_ucode_patch;
-#endif
+ u16 eq_id = 0;
+ u32 *header;
+ u8 *data;
data = ucode;
left = size;
header = (u32 *)data;
+
/* find equiv cpu table */
if (header[0] != UCODE_MAGIC ||
header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
header[2] == 0) /* size */
- return;
+ return ret;
eax = 0x00000001;
ecx = 0;
@@ -185,7 +147,7 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
while (left > 0) {
eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
- *cont = data;
+ ret.data = data;
/* Advance past the container header */
offset = header[2] + CONTAINER_HDR_SZ;
@@ -194,15 +156,15 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
eq_id = find_equiv_id(eq, eax);
if (eq_id) {
- this_equiv_id = eq_id;
- *cont_sz = compute_container_size(*cont, left + offset);
+ ret.size = compute_container_size(ret.data, left + offset);
/*
* truncate how much we need to iterate over in the
* ucode update loop below
*/
- left = *cont_sz - offset;
- break;
+ left = ret.size - offset;
+ *ret_id = eq_id;
+ return ret;
}
/*
@@ -212,6 +174,7 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
*/
while (left > 0) {
header = (u32 *)data;
+
if (header[0] == UCODE_MAGIC &&
header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
break;
@@ -226,14 +189,64 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
ucode = data;
}
- if (!eq_id) {
- *cont = NULL;
- *cont_sz = 0;
- return;
- }
+ return ret;
+}
+
+static int __apply_microcode_amd(struct microcode_amd *mc_amd)
+{
+ u32 rev, dummy;
+
+ native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
+
+ /* verify patch application was successful */
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ if (rev != mc_amd->hdr.patch_id)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Early load occurs before we can vmalloc(). So we look for the microcode
+ * patch container file in initrd, traverse equivalent cpu table, look for a
+ * matching microcode patch, and update, all in initrd memory in place.
+ * When vmalloc() is available for use later -- on 64-bit during first AP load,
+ * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
+ * load_microcode_amd() to save equivalent cpu table and microcode patches in
+ * kernel heap memory.
+ */
+static struct container
+apply_microcode_early_amd(void *ucode, size_t size, bool save_patch)
+{
+ struct container ret = { NULL, 0 };
+ u8 (*patch)[PATCH_MAX_SIZE];
+ int offset, left;
+ u32 rev, *header;
+ u8 *data;
+ u16 eq_id = 0;
+ u32 *new_rev;
+
+#ifdef CONFIG_X86_32
+ new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
+ patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
+#else
+ new_rev = &ucode_new_rev;
+ patch = &amd_ucode_patch;
+#endif
if (check_current_patch_level(&rev, true))
- return;
+ return (struct container){ NULL, 0 };
+
+ ret = find_proper_container(ucode, size, &eq_id);
+ if (!eq_id)
+ return (struct container){ NULL, 0 };
+
+ this_equiv_id = eq_id;
+ header = (u32 *)ret.data;
+
+ /* We're pointing to an equiv table, skip over it. */
+ data = ret.data + header[2] + CONTAINER_HDR_SZ;
+ left = ret.size - (header[2] + CONTAINER_HDR_SZ);
while (left > 0) {
struct microcode_amd *mc;
@@ -252,8 +265,7 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
*new_rev = rev;
if (save_patch)
- memcpy(patch, mc,
- min_t(u32, header[1], PATCH_MAX_SIZE));
+ memcpy(patch, mc, min_t(u32, header[1], PATCH_MAX_SIZE));
}
}
@@ -261,10 +273,10 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
data += offset;
left -= offset;
}
+ return ret;
}
-static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
- unsigned int family)
+static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
{
#ifdef CONFIG_X86_64
char fw_name[36] = "amd-ucode/microcode_amd.bin";
@@ -281,47 +293,45 @@ static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
void __init load_ucode_amd_bsp(unsigned int family)
{
+ struct ucode_cpu_info *uci;
struct cpio_data cp;
- bool *builtin;
- void **data;
- size_t *size;
+ const char *path;
+ bool use_pa;
-#ifdef CONFIG_X86_32
- data = (void **)__pa_nodebug(&ucode_cpio.data);
- size = (size_t *)__pa_nodebug(&ucode_cpio.size);
- builtin = (bool *)__pa_nodebug(&ucode_builtin);
-#else
- data = &ucode_cpio.data;
- size = &ucode_cpio.size;
- builtin = &ucode_builtin;
-#endif
+ if (IS_ENABLED(CONFIG_X86_32)) {
+ uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
+ path = (const char *)__pa_nodebug(ucode_path);
+ use_pa = true;
+ } else {
+ uci = ucode_cpu_info;
+ path = ucode_path;
+ use_pa = false;
+ }
- *builtin = load_builtin_amd_microcode(&cp, family);
- if (!*builtin)
- cp = find_ucode_in_initrd();
+ if (!get_builtin_microcode(&cp, family))
+ cp = find_microcode_in_initrd(path, use_pa);
if (!(cp.data && cp.size))
return;
- *data = cp.data;
- *size = cp.size;
+ /* Get BSP's CPUID.EAX(1), needed in load_microcode_amd() */
+ uci->cpu_sig.sig = cpuid_eax(1);
- apply_ucode_in_initrd(cp.data, cp.size, true);
+ apply_microcode_early_amd(cp.data, cp.size, true);
}
#ifdef CONFIG_X86_32
/*
* On 32-bit, since AP's early load occurs before paging is turned on, we
- * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
- * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
- * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
+ * cannot traverse cpu_equiv_table and microcode_cache in kernel heap memory.
+ * So during cold boot, AP will apply_ucode_in_initrd() just like the BSP.
+ * In save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
* which is used upon resume from suspend.
*/
-void load_ucode_amd_ap(void)
+void load_ucode_amd_ap(unsigned int family)
{
struct microcode_amd *mc;
- size_t *usize;
- void **ucode;
+ struct cpio_data cp;
mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
@@ -329,60 +339,63 @@ void load_ucode_amd_ap(void)
return;
}
- ucode = (void *)__pa_nodebug(&container);
- usize = (size_t *)__pa_nodebug(&container_size);
+ if (!get_builtin_microcode(&cp, family))
+ cp = find_microcode_in_initrd((const char *)__pa_nodebug(ucode_path), true);
- if (!*ucode || !*usize)
+ if (!(cp.data && cp.size))
return;
- apply_ucode_in_initrd(*ucode, *usize, false);
-}
-
-static void __init collect_cpu_sig_on_bsp(void *arg)
-{
- unsigned int cpu = smp_processor_id();
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-
- uci->cpu_sig.sig = cpuid_eax(0x00000001);
-}
-
-static void __init get_bsp_sig(void)
-{
- unsigned int bsp = boot_cpu_data.cpu_index;
- struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
-
- if (!uci->cpu_sig.sig)
- smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+ /*
+ * This would set amd_ucode_patch above so that the following APs can
+ * use it directly instead of going down this path again.
+ */
+ apply_microcode_early_amd(cp.data, cp.size, true);
}
#else
-void load_ucode_amd_ap(void)
+void load_ucode_amd_ap(unsigned int family)
{
- unsigned int cpu = smp_processor_id();
struct equiv_cpu_entry *eq;
struct microcode_amd *mc;
- u8 *cont = container;
u32 rev, eax;
u16 eq_id;
- /* Exit if called on the BSP. */
- if (!cpu)
+ /* 64-bit runs with paging enabled, thus early==false. */
+ if (check_current_patch_level(&rev, false))
return;
- if (!container)
- return;
+ /* First AP hasn't cached it yet, go through the blob. */
+ if (!cont.data) {
+ struct cpio_data cp = { NULL, 0, "" };
- /*
- * 64-bit runs with paging enabled, thus early==false.
- */
- if (check_current_patch_level(&rev, false))
- return;
+ if (cont.size == -1)
+ return;
- /* Add CONFIG_RANDOMIZE_MEMORY offset. */
- if (!ucode_builtin)
- cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+reget:
+ if (!get_builtin_microcode(&cp, family)) {
+#ifdef CONFIG_BLK_DEV_INITRD
+ cp = find_cpio_data(ucode_path, (void *)initrd_start,
+ initrd_end - initrd_start, NULL);
+#endif
+ if (!(cp.data && cp.size)) {
+ /*
+ * Mark it so that other APs do not scan again
+ * for no real reason and slow down boot
+ * needlessly.
+ */
+ cont.size = -1;
+ return;
+ }
+ }
+
+ cont = apply_microcode_early_amd(cp.data, cp.size, false);
+ if (!(cont.data && cont.size)) {
+ cont.size = -1;
+ return;
+ }
+ }
eax = cpuid_eax(0x00000001);
- eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
+ eq = (struct equiv_cpu_entry *)(cont.data + CONTAINER_HDR_SZ);
eq_id = find_equiv_id(eq, eax);
if (!eq_id)
@@ -397,61 +410,50 @@ void load_ucode_amd_ap(void)
}
} else {
- if (!ucode_cpio.data)
- return;
/*
* AP has a different equivalence ID than BSP, looks like
* mixed-steppings silicon so go through the ucode blob anew.
*/
- apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
+ goto reget;
}
}
-#endif
+#endif /* CONFIG_X86_32 */
+
+static enum ucode_state
+load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
-int __init save_microcode_in_initrd_amd(void)
+int __init save_microcode_in_initrd_amd(unsigned int fam)
{
- unsigned long cont;
- int retval = 0;
enum ucode_state ret;
- u8 *cont_va;
- u32 eax;
+ int retval = 0;
+ u16 eq_id;
- if (!container)
- return -EINVAL;
+ if (!cont.data) {
+ if (IS_ENABLED(CONFIG_X86_32) && (cont.size != -1)) {
+ struct cpio_data cp = { NULL, 0, "" };
-#ifdef CONFIG_X86_32
- get_bsp_sig();
- cont = (unsigned long)container;
- cont_va = __va(container);
-#else
- /*
- * We need the physical address of the container for both bitness since
- * boot_params.hdr.ramdisk_image is a physical address.
- */
- cont = __pa_nodebug(container);
- cont_va = container;
+#ifdef CONFIG_BLK_DEV_INITRD
+ cp = find_cpio_data(ucode_path, (void *)initrd_start,
+ initrd_end - initrd_start, NULL);
#endif
- /*
- * Take into account the fact that the ramdisk might get relocated and
- * therefore we need to recompute the container's position in virtual
- * memory space.
- */
- if (relocated_ramdisk)
- container = (u8 *)(__va(relocated_ramdisk) +
- (cont - boot_params.hdr.ramdisk_image));
- else
- container = cont_va;
+ if (!(cp.data && cp.size)) {
+ cont.size = -1;
+ return -EINVAL;
+ }
- /* Add CONFIG_RANDOMIZE_MEMORY offset. */
- if (!ucode_builtin)
- container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
+ cont = find_proper_container(cp.data, cp.size, &eq_id);
+ if (!eq_id) {
+ cont.size = -1;
+ return -EINVAL;
+ }
- eax = cpuid_eax(0x00000001);
- eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+ } else
+ return -EINVAL;
+ }
- ret = load_microcode_amd(smp_processor_id(), eax, container, container_size);
+ ret = load_microcode_amd(smp_processor_id(), fam, cont.data, cont.size);
if (ret != UCODE_OK)
retval = -EINVAL;
@@ -459,8 +461,8 @@ int __init save_microcode_in_initrd_amd(void)
* This will be freed any msec now, stash patches for the current
* family and switch to patch cache for cpu hotplug, etc later.
*/
- container = NULL;
- container_size = 0;
+ cont.data = NULL;
+ cont.size = 0;
return retval;
}
@@ -478,8 +480,10 @@ void reload_ucode_amd(void)
return;
mc = (struct microcode_amd *)amd_ucode_patch;
+ if (!mc)
+ return;
- if (mc && rev < mc->hdr.patch_id) {
+ if (rev < mc->hdr.patch_id) {
if (!__apply_microcode_amd(mc)) {
ucode_new_rev = mc->hdr.patch_id;
pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
@@ -513,7 +517,7 @@ static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
{
struct ucode_patch *p;
- list_for_each_entry(p, &pcache, plist)
+ list_for_each_entry(p, &microcode_cache, plist)
if (p->equiv_cpu == equiv_cpu)
return p;
return NULL;
@@ -523,7 +527,7 @@ static void update_cache(struct ucode_patch *new_patch)
{
struct ucode_patch *p;
- list_for_each_entry(p, &pcache, plist) {
+ list_for_each_entry(p, &microcode_cache, plist) {
if (p->equiv_cpu == new_patch->equiv_cpu) {
if (p->patch_id >= new_patch->patch_id)
/* we already have the latest patch */
@@ -536,14 +540,14 @@ static void update_cache(struct ucode_patch *new_patch)
}
}
/* no patch found, add it */
- list_add_tail(&new_patch->plist, &pcache);
+ list_add_tail(&new_patch->plist, &microcode_cache);
}
static void free_cache(void)
{
struct ucode_patch *p, *tmp;
- list_for_each_entry_safe(p, tmp, &pcache, plist) {
+ list_for_each_entry_safe(p, tmp, &microcode_cache, plist) {
__list_del(p->plist.prev, p->plist.next);
kfree(p->data);
kfree(p);
@@ -663,21 +667,7 @@ bool check_current_patch_level(u32 *rev, bool early)
return ret;
}
-int __apply_microcode_amd(struct microcode_amd *mc_amd)
-{
- u32 rev, dummy;
-
- native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
-
- /* verify patch application was successful */
- native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
- if (rev != mc_amd->hdr.patch_id)
- return -1;
-
- return 0;
-}
-
-int apply_microcode_amd(int cpu)
+static int apply_microcode_amd(int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_amd *mc_amd;
@@ -860,7 +850,8 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
return UCODE_OK;
}
-enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
+static enum ucode_state
+load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 5ce5155f0695..6996413c78c3 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
* 2006 Shaohua Li <shaohua.li@intel.com>
- * 2013-2015 Borislav Petkov <bp@alien8.de>
+ * 2013-2016 Borislav Petkov <bp@alien8.de>
*
* X86 CPU microcode early update for Linux:
*
@@ -39,12 +39,15 @@
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cmdline.h>
+#include <asm/setup.h>
-#define MICROCODE_VERSION "2.01"
+#define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops;
static bool dis_ucode_ldr;
+LIST_HEAD(microcode_cache);
+
/*
* Synchronization.
*
@@ -167,7 +170,7 @@ void load_ucode_ap(void)
break;
case X86_VENDOR_AMD:
if (family >= 0x10)
- load_ucode_amd_ap();
+ load_ucode_amd_ap(family);
break;
default:
break;
@@ -185,7 +188,7 @@ static int __init save_microcode_in_initrd(void)
break;
case X86_VENDOR_AMD:
if (c->x86 >= 0x10)
- return save_microcode_in_initrd_amd();
+ return save_microcode_in_initrd_amd(c->x86);
break;
default:
break;
@@ -194,6 +197,58 @@ static int __init save_microcode_in_initrd(void)
return -EINVAL;
}
+struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned long start = 0;
+ size_t size;
+
+#ifdef CONFIG_X86_32
+ struct boot_params *params;
+
+ if (use_pa)
+ params = (struct boot_params *)__pa_nodebug(&boot_params);
+ else
+ params = &boot_params;
+
+ size = params->hdr.ramdisk_size;
+
+ /*
+ * Set start only if we have an initrd image. We cannot use initrd_start
+ * because it is not set that early yet.
+ */
+ if (size)
+ start = params->hdr.ramdisk_image;
+
+# else /* CONFIG_X86_64 */
+ size = (unsigned long)boot_params.ext_ramdisk_size << 32;
+ size |= boot_params.hdr.ramdisk_size;
+
+ if (size) {
+ start = (unsigned long)boot_params.ext_ramdisk_image << 32;
+ start |= boot_params.hdr.ramdisk_image;
+
+ start += PAGE_OFFSET;
+ }
+# endif
+
+ /*
+ * Did we relocate the ramdisk?
+ *
+ * So we possibly relocate the ramdisk *after* applying microcode on the
+ * BSP so we rely on use_pa (use physical addresses) - even if it is not
+ * absolutely correct - to determine whether we've done the ramdisk
+ * relocation already.
+ */
+ if (!use_pa && relocated_ramdisk)
+ start = initrd_start;
+
+ return find_cpio_data(path, (void *)start, size, NULL);
+#else /* !CONFIG_BLK_DEV_INITRD */
+ return (struct cpio_data){ NULL, 0, "" };
+#endif
+}
+
void reload_early_microcode(void)
{
int vendor, family;
@@ -453,16 +508,17 @@ static struct attribute_group mc_attr_group = {
static void microcode_fini_cpu(int cpu)
{
- microcode_ops->microcode_fini_cpu(cpu);
+ if (microcode_ops->microcode_fini_cpu)
+ microcode_ops->microcode_fini_cpu(cpu);
}
static enum ucode_state microcode_resume_cpu(int cpu)
{
- pr_debug("CPU%d updated upon resume\n", cpu);
-
if (apply_microcode_on_target(cpu))
return UCODE_ERROR;
+ pr_debug("CPU%d updated upon resume\n", cpu);
+
return UCODE_OK;
}
@@ -496,6 +552,9 @@ static enum ucode_state microcode_update_cpu(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ /* Refresh CPU microcode revision after resume. */
+ collect_cpu_info(cpu);
+
if (uci->valid)
return microcode_resume_cpu(cpu);
@@ -579,12 +638,7 @@ static int mc_cpu_down_prep(unsigned int cpu)
/* Suspend is in progress, only remove the interface */
sysfs_remove_group(&dev->kobj, &mc_attr_group);
pr_debug("CPU%d removed\n", cpu);
- /*
- * When a CPU goes offline, don't free up or invalidate the copy of
- * the microcode in kernel memory, so that we can reuse it when the
- * CPU comes back online without unnecessarily requesting the userspace
- * for it again.
- */
+
return 0;
}
@@ -649,8 +703,7 @@ int __init microcode_init(void)
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);
- pr_info("Microcode Update Driver: v" MICROCODE_VERSION
- " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
+ pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
return 0;
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index cdc0deab00c9..54d50c3694d8 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -39,125 +39,83 @@
#include <asm/setup.h>
#include <asm/msr.h>
-/*
- * Temporary microcode blobs pointers storage. We note here during early load
- * the pointers to microcode blobs we've got from whatever storage (detached
- * initrd, builtin). Later on, we put those into final storage
- * mc_saved_data.mc_saved.
- *
- * Important: those are offsets from the beginning of initrd or absolute
- * addresses within the kernel image when built-in.
- */
-static unsigned long mc_tmp_ptrs[MAX_UCODE_COUNT];
-
-static struct mc_saved_data {
- unsigned int num_saved;
- struct microcode_intel **mc_saved;
-} mc_saved_data;
+static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
-/* Microcode blobs within the initrd. 0 if builtin. */
-static struct ucode_blobs {
- unsigned long start;
- bool valid;
-} blobs;
+/* Current microcode patch used in early patching */
+struct microcode_intel *intel_ucode_patch;
-/* Go through saved patches and find the one suitable for the current CPU. */
-static enum ucode_state
-find_microcode_patch(struct microcode_intel **saved,
- unsigned int num_saved, struct ucode_cpu_info *uci)
+static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
+ unsigned int s2, unsigned int p2)
{
- struct microcode_intel *ucode_ptr, *new_mc = NULL;
- struct microcode_header_intel *mc_hdr;
- int new_rev, ret, i;
-
- new_rev = uci->cpu_sig.rev;
-
- for (i = 0; i < num_saved; i++) {
- ucode_ptr = saved[i];
- mc_hdr = (struct microcode_header_intel *)ucode_ptr;
-
- ret = has_newer_microcode(ucode_ptr,
- uci->cpu_sig.sig,
- uci->cpu_sig.pf,
- new_rev);
- if (!ret)
- continue;
-
- new_rev = mc_hdr->rev;
- new_mc = ucode_ptr;
- }
+ if (s1 != s2)
+ return false;
- if (!new_mc)
- return UCODE_NFOUND;
+ /* Processor flags are either both 0 ... */
+ if (!p1 && !p2)
+ return true;
- uci->mc = (struct microcode_intel *)new_mc;
- return UCODE_OK;
+ /* ... or they intersect. */
+ return p1 & p2;
}
-static inline void
-copy_ptrs(struct microcode_intel **mc_saved, unsigned long *mc_ptrs,
- unsigned long off, int num_saved)
+/*
+ * Returns 1 if update has been found, 0 otherwise.
+ */
+static int find_matching_signature(void *mc, unsigned int csig, int cpf)
{
+ struct microcode_header_intel *mc_hdr = mc;
+ struct extended_sigtable *ext_hdr;
+ struct extended_signature *ext_sig;
int i;
- for (i = 0; i < num_saved; i++)
- mc_saved[i] = (struct microcode_intel *)(mc_ptrs[i] + off);
-}
-
-#ifdef CONFIG_X86_32
-static void
-microcode_phys(struct microcode_intel **mc_saved_tmp, struct mc_saved_data *mcs)
-{
- int i;
- struct microcode_intel ***mc_saved;
+ if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
+ return 1;
- mc_saved = (struct microcode_intel ***)__pa_nodebug(&mcs->mc_saved);
+ /* Look for ext. headers: */
+ if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
+ return 0;
- for (i = 0; i < mcs->num_saved; i++) {
- struct microcode_intel *p;
+ ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
+ ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
- p = *(struct microcode_intel **)__pa_nodebug(mcs->mc_saved + i);
- mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
+ for (i = 0; i < ext_hdr->count; i++) {
+ if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
+ return 1;
+ ext_sig++;
}
+ return 0;
}
-#endif
-static enum ucode_state
-load_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- unsigned long offset, struct ucode_cpu_info *uci)
+/*
+ * Returns 1 if update has been found, 0 otherwise.
+ */
+static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
{
- struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
- unsigned int count = mcs->num_saved;
+ struct microcode_header_intel *mc_hdr = mc;
- if (!mcs->mc_saved) {
- copy_ptrs(mc_saved_tmp, mc_ptrs, offset, count);
+ if (mc_hdr->rev <= new_rev)
+ return 0;
- return find_microcode_patch(mc_saved_tmp, count, uci);
- } else {
-#ifdef CONFIG_X86_32
- microcode_phys(mc_saved_tmp, mcs);
- return find_microcode_patch(mc_saved_tmp, count, uci);
-#else
- return find_microcode_patch(mcs->mc_saved, count, uci);
-#endif
- }
+ return find_matching_signature(mc, csig, cpf);
}
/*
* Given CPU signature and a microcode patch, this function finds if the
* microcode patch has matching family and model with the CPU.
+ *
+ * %true - if there's a match
+ * %false - otherwise
*/
-static enum ucode_state
-matching_model_microcode(struct microcode_header_intel *mc_header,
- unsigned long sig)
+static bool microcode_matches(struct microcode_header_intel *mc_header,
+ unsigned long sig)
{
- unsigned int fam, model;
- unsigned int fam_ucode, model_ucode;
- struct extended_sigtable *ext_header;
unsigned long total_size = get_totalsize(mc_header);
unsigned long data_size = get_datasize(mc_header);
- int ext_sigcount, i;
+ struct extended_sigtable *ext_header;
+ unsigned int fam_ucode, model_ucode;
struct extended_signature *ext_sig;
+ unsigned int fam, model;
+ int ext_sigcount, i;
fam = x86_family(sig);
model = x86_model(sig);
@@ -166,11 +124,11 @@ matching_model_microcode(struct microcode_header_intel *mc_header,
model_ucode = x86_model(mc_header->sig);
if (fam == fam_ucode && model == model_ucode)
- return UCODE_OK;
+ return true;
/* Look for ext. headers: */
if (total_size <= data_size + MC_HEADER_SIZE)
- return UCODE_NFOUND;
+ return false;
ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE;
ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
@@ -181,192 +139,242 @@ matching_model_microcode(struct microcode_header_intel *mc_header,
model_ucode = x86_model(ext_sig->sig);
if (fam == fam_ucode && model == model_ucode)
- return UCODE_OK;
+ return true;
ext_sig++;
}
- return UCODE_NFOUND;
+ return false;
}
-static int
-save_microcode(struct mc_saved_data *mcs,
- struct microcode_intel **mc_saved_src,
- unsigned int num_saved)
+static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
{
- int i, j;
- struct microcode_intel **saved_ptr;
- int ret;
+ struct ucode_patch *p;
- if (!num_saved)
- return -EINVAL;
+ p = kzalloc(size, GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
- /*
- * Copy new microcode data.
- */
- saved_ptr = kcalloc(num_saved, sizeof(struct microcode_intel *), GFP_KERNEL);
- if (!saved_ptr)
- return -ENOMEM;
-
- for (i = 0; i < num_saved; i++) {
- struct microcode_header_intel *mc_hdr;
- struct microcode_intel *mc;
- unsigned long size;
-
- if (!mc_saved_src[i]) {
- ret = -EINVAL;
- goto err;
- }
+ p->data = kmemdup(data, size, GFP_KERNEL);
+ if (!p->data) {
+ kfree(p);
+ return ERR_PTR(-ENOMEM);
+ }
- mc = mc_saved_src[i];
- mc_hdr = &mc->hdr;
- size = get_totalsize(mc_hdr);
+ return p;
+}
- saved_ptr[i] = kmemdup(mc, size, GFP_KERNEL);
- if (!saved_ptr[i]) {
- ret = -ENOMEM;
- goto err;
+static void save_microcode_patch(void *data, unsigned int size)
+{
+ struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
+ struct ucode_patch *iter, *tmp, *p;
+ bool prev_found = false;
+ unsigned int sig, pf;
+
+ mc_hdr = (struct microcode_header_intel *)data;
+
+ list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
+ mc_saved_hdr = (struct microcode_header_intel *)iter->data;
+ sig = mc_saved_hdr->sig;
+ pf = mc_saved_hdr->pf;
+
+ if (find_matching_signature(data, sig, pf)) {
+ prev_found = true;
+
+ if (mc_hdr->rev <= mc_saved_hdr->rev)
+ continue;
+
+ p = __alloc_microcode_buf(data, size);
+ if (IS_ERR(p))
+ pr_err("Error allocating buffer %p\n", data);
+ else
+ list_replace(&iter->plist, &p->plist);
}
}
/*
- * Point to newly saved microcode.
+ * There weren't any previous patches found in the list cache; save the
+ * newly found.
*/
- mcs->mc_saved = saved_ptr;
- mcs->num_saved = num_saved;
-
- return 0;
-
-err:
- for (j = 0; j <= i; j++)
- kfree(saved_ptr[j]);
- kfree(saved_ptr);
-
- return ret;
+ if (!prev_found) {
+ p = __alloc_microcode_buf(data, size);
+ if (IS_ERR(p))
+ pr_err("Error allocating buffer for %p\n", data);
+ else
+ list_add_tail(&p->plist, &microcode_cache);
+ }
}
-/*
- * A microcode patch in ucode_ptr is saved into mc_saved
- * - if it has matching signature and newer revision compared to an existing
- * patch mc_saved.
- * - or if it is a newly discovered microcode patch.
- *
- * The microcode patch should have matching model with CPU.
- *
- * Returns: The updated number @num_saved of saved microcode patches.
- */
-static unsigned int _save_mc(struct microcode_intel **mc_saved,
- u8 *ucode_ptr, unsigned int num_saved)
+static int microcode_sanity_check(void *mc, int print_err)
{
- struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
- unsigned int sig, pf;
- int found = 0, i;
+ unsigned long total_size, data_size, ext_table_size;
+ struct microcode_header_intel *mc_header = mc;
+ struct extended_sigtable *ext_header = NULL;
+ u32 sum, orig_sum, ext_sigcount = 0, i;
+ struct extended_signature *ext_sig;
- mc_hdr = (struct microcode_header_intel *)ucode_ptr;
+ total_size = get_totalsize(mc_header);
+ data_size = get_datasize(mc_header);
- for (i = 0; i < num_saved; i++) {
- mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
- sig = mc_saved_hdr->sig;
- pf = mc_saved_hdr->pf;
+ if (data_size + MC_HEADER_SIZE > total_size) {
+ if (print_err)
+ pr_err("Error: bad microcode data file size.\n");
+ return -EINVAL;
+ }
- if (!find_matching_signature(ucode_ptr, sig, pf))
- continue;
+ if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
+ if (print_err)
+ pr_err("Error: invalid/unknown microcode update format.\n");
+ return -EINVAL;
+ }
- found = 1;
+ ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
+ if (ext_table_size) {
+ u32 ext_table_sum = 0;
+ u32 *ext_tablep;
- if (mc_hdr->rev <= mc_saved_hdr->rev)
- continue;
+ if ((ext_table_size < EXT_HEADER_SIZE)
+ || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
+ if (print_err)
+ pr_err("Error: truncated extended signature table.\n");
+ return -EINVAL;
+ }
+
+ ext_header = mc + MC_HEADER_SIZE + data_size;
+ if (ext_table_size != exttable_size(ext_header)) {
+ if (print_err)
+ pr_err("Error: extended signature table size mismatch.\n");
+ return -EFAULT;
+ }
+
+ ext_sigcount = ext_header->count;
/*
- * Found an older ucode saved earlier. Replace it with
- * this newer one.
+ * Check extended table checksum: the sum of all dwords that
+ * comprise a valid table must be 0.
*/
- mc_saved[i] = (struct microcode_intel *)ucode_ptr;
- break;
+ ext_tablep = (u32 *)ext_header;
+
+ i = ext_table_size / sizeof(u32);
+ while (i--)
+ ext_table_sum += ext_tablep[i];
+
+ if (ext_table_sum) {
+ if (print_err)
+ pr_warn("Bad extended signature table checksum, aborting.\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Calculate the checksum of update data and header. The checksum of
+ * valid update data and header including the extended signature table
+ * must be 0.
+ */
+ orig_sum = 0;
+ i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
+ while (i--)
+ orig_sum += ((u32 *)mc)[i];
+
+ if (orig_sum) {
+ if (print_err)
+ pr_err("Bad microcode data checksum, aborting.\n");
+ return -EINVAL;
}
- /* Newly detected microcode, save it to memory. */
- if (i >= num_saved && !found)
- mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
+ if (!ext_table_size)
+ return 0;
- return num_saved;
+ /*
+ * Check extended signature checksum: 0 => valid.
+ */
+ for (i = 0; i < ext_sigcount; i++) {
+ ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
+ EXT_SIGNATURE_SIZE * i;
+
+ sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
+ (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
+ if (sum) {
+ if (print_err)
+ pr_err("Bad extended signature checksum, aborting.\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
}
/*
* Get microcode matching with BSP's model. Only CPUs with the same model as
* BSP can stay in the platform.
*/
-static enum ucode_state __init
-get_matching_model_microcode(unsigned long start, void *data, size_t size,
- struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- struct ucode_cpu_info *uci)
+static struct microcode_intel *
+scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save)
{
- struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
struct microcode_header_intel *mc_header;
- unsigned int num_saved = mcs->num_saved;
- enum ucode_state state = UCODE_OK;
- unsigned int leftover = size;
- u8 *ucode_ptr = data;
+ struct microcode_intel *patch = NULL;
unsigned int mc_size;
- int i;
-
- while (leftover && num_saved < ARRAY_SIZE(mc_saved_tmp)) {
- if (leftover < sizeof(mc_header))
+ while (size) {
+ if (size < sizeof(struct microcode_header_intel))
break;
- mc_header = (struct microcode_header_intel *)ucode_ptr;
+ mc_header = (struct microcode_header_intel *)data;
mc_size = get_totalsize(mc_header);
- if (!mc_size || mc_size > leftover ||
- microcode_sanity_check(ucode_ptr, 0) < 0)
+ if (!mc_size ||
+ mc_size > size ||
+ microcode_sanity_check(data, 0) < 0)
break;
- leftover -= mc_size;
+ size -= mc_size;
- /*
- * Since APs with same family and model as the BSP may boot in
- * the platform, we need to find and save microcode patches
- * with the same family and model as the BSP.
- */
- if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != UCODE_OK) {
- ucode_ptr += mc_size;
+ if (!microcode_matches(mc_header, uci->cpu_sig.sig)) {
+ data += mc_size;
continue;
}
- num_saved = _save_mc(mc_saved_tmp, ucode_ptr, num_saved);
+ if (save) {
+ save_microcode_patch(data, mc_size);
+ goto next;
+ }
- ucode_ptr += mc_size;
- }
- if (leftover) {
- state = UCODE_ERROR;
- return state;
- }
+ if (!patch) {
+ if (!has_newer_microcode(data,
+ uci->cpu_sig.sig,
+ uci->cpu_sig.pf,
+ uci->cpu_sig.rev))
+ goto next;
- if (!num_saved) {
- state = UCODE_NFOUND;
- return state;
- }
+ } else {
+ struct microcode_header_intel *phdr = &patch->hdr;
- for (i = 0; i < num_saved; i++)
- mc_ptrs[i] = (unsigned long)mc_saved_tmp[i] - start;
+ if (!has_newer_microcode(data,
+ phdr->sig,
+ phdr->pf,
+ phdr->rev))
+ goto next;
+ }
+
+ /* We have a newer patch, save it. */
+ patch = data;
- mcs->num_saved = num_saved;
+next:
+ data += mc_size;
+ }
- return state;
+ if (size)
+ return NULL;
+
+ return patch;
}
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
unsigned int family, model;
- struct cpu_signature csig;
+ struct cpu_signature csig = { 0 };
unsigned int eax, ebx, ecx, edx;
- csig.sig = 0;
- csig.pf = 0;
- csig.rev = 0;
-
memset(uci, 0, sizeof(*uci));
eax = 0x00000001;
@@ -374,8 +382,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_cpuid(&eax, &ebx, &ecx, &edx);
csig.sig = eax;
- family = x86_family(csig.sig);
- model = x86_model(csig.sig);
+ family = x86_family(eax);
+ model = x86_model(eax);
if ((model >= 5) || (family > 6)) {
/* get processor flags from MSR 0x17 */
@@ -401,40 +409,41 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
static void show_saved_mc(void)
{
#ifdef DEBUG
- int i, j;
+ int i = 0, j;
unsigned int sig, pf, rev, total_size, data_size, date;
struct ucode_cpu_info uci;
+ struct ucode_patch *p;
- if (!mc_saved_data.num_saved) {
+ if (list_empty(&microcode_cache)) {
pr_debug("no microcode data saved.\n");
return;
}
- pr_debug("Total microcode saved: %d\n", mc_saved_data.num_saved);
collect_cpu_info_early(&uci);
- sig = uci.cpu_sig.sig;
- pf = uci.cpu_sig.pf;
- rev = uci.cpu_sig.rev;
+ sig = uci.cpu_sig.sig;
+ pf = uci.cpu_sig.pf;
+ rev = uci.cpu_sig.rev;
pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
- for (i = 0; i < mc_saved_data.num_saved; i++) {
+ list_for_each_entry(p, &microcode_cache, plist) {
struct microcode_header_intel *mc_saved_header;
struct extended_sigtable *ext_header;
- int ext_sigcount;
struct extended_signature *ext_sig;
+ int ext_sigcount;
+
+ mc_saved_header = (struct microcode_header_intel *)p->data;
- mc_saved_header = (struct microcode_header_intel *)
- mc_saved_data.mc_saved[i];
- sig = mc_saved_header->sig;
- pf = mc_saved_header->pf;
- rev = mc_saved_header->rev;
- total_size = get_totalsize(mc_saved_header);
- data_size = get_datasize(mc_saved_header);
- date = mc_saved_header->date;
+ sig = mc_saved_header->sig;
+ pf = mc_saved_header->pf;
+ rev = mc_saved_header->rev;
+ date = mc_saved_header->date;
+
+ total_size = get_totalsize(mc_saved_header);
+ data_size = get_datasize(mc_saved_header);
pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n",
- i, sig, pf, rev, total_size,
+ i++, sig, pf, rev, total_size,
date & 0xffff,
date >> 24,
(date >> 16) & 0xff);
@@ -443,7 +452,7 @@ static void show_saved_mc(void)
if (total_size <= data_size + MC_HEADER_SIZE)
continue;
- ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
+ ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE;
ext_sigcount = ext_header->count;
ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
@@ -456,85 +465,43 @@ static void show_saved_mc(void)
ext_sig++;
}
-
}
#endif
}
/*
- * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
- * hot added or resumes.
- *
- * Please make sure this mc should be a valid microcode patch before calling
- * this function.
+ * Save this microcode patch. It will be loaded early when a CPU is
+ * hot-added or resumes.
*/
-static void save_mc_for_early(u8 *mc)
+static void save_mc_for_early(u8 *mc, unsigned int size)
{
#ifdef CONFIG_HOTPLUG_CPU
/* Synchronization during CPU hotplug. */
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
- struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
- unsigned int mc_saved_count_init;
- unsigned int num_saved;
- struct microcode_intel **mc_saved;
- int ret, i;
-
mutex_lock(&x86_cpu_microcode_mutex);
- mc_saved_count_init = mc_saved_data.num_saved;
- num_saved = mc_saved_data.num_saved;
- mc_saved = mc_saved_data.mc_saved;
-
- if (mc_saved && num_saved)
- memcpy(mc_saved_tmp, mc_saved,
- num_saved * sizeof(struct microcode_intel *));
- /*
- * Save the microcode patch mc in mc_save_tmp structure if it's a newer
- * version.
- */
- num_saved = _save_mc(mc_saved_tmp, mc, num_saved);
-
- /*
- * Save the mc_save_tmp in global mc_saved_data.
- */
- ret = save_microcode(&mc_saved_data, mc_saved_tmp, num_saved);
- if (ret) {
- pr_err("Cannot save microcode patch.\n");
- goto out;
- }
-
+ save_microcode_patch(mc, size);
show_saved_mc();
- /*
- * Free old saved microcode data.
- */
- if (mc_saved) {
- for (i = 0; i < mc_saved_count_init; i++)
- kfree(mc_saved[i]);
- kfree(mc_saved);
- }
-
-out:
mutex_unlock(&x86_cpu_microcode_mutex);
#endif
}
-static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
+static bool load_builtin_intel_microcode(struct cpio_data *cp)
{
-#ifdef CONFIG_X86_64
- unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
+ unsigned int eax = 1, ebx, ecx = 0, edx;
char name[30];
+ if (IS_ENABLED(CONFIG_X86_32))
+ return false;
+
native_cpuid(&eax, &ebx, &ecx, &edx);
sprintf(name, "intel-ucode/%02x-%02x-%02x",
x86_family(eax), x86_model(eax), x86_stepping(eax));
return get_builtin_firmware(cp, name);
-#else
- return false;
-#endif
}
/*
@@ -570,8 +537,7 @@ void show_ucode_info_early(void)
}
/*
- * At this point, we can not call printk() yet. Keep microcode patch number in
- * mc_saved_data.mc_saved and delay printing microcode info in
+ * At this point, we can not call printk() yet. Delay printing microcode info in
* show_ucode_info_early() until printk() works.
*/
static void print_ucode(struct ucode_cpu_info *uci)
@@ -648,206 +614,140 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
return 0;
}
-/*
- * This function converts microcode patch offsets previously stored in
- * mc_tmp_ptrs to pointers and stores the pointers in mc_saved_data.
- */
int __init save_microcode_in_initrd_intel(void)
{
- struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
- unsigned int count = mc_saved_data.num_saved;
- unsigned long offset = 0;
- int ret;
-
- if (!count)
- return 0;
+ struct ucode_cpu_info uci;
+ struct cpio_data cp;
/*
- * We have found a valid initrd but it might've been relocated in the
- * meantime so get its updated address.
+ * AP loading didn't find any microcode patch, no need to save anything.
*/
- if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && blobs.valid)
- offset = initrd_start;
-
- copy_ptrs(mc_saved, mc_tmp_ptrs, offset, count);
+ if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
+ return 0;
- ret = save_microcode(&mc_saved_data, mc_saved, count);
- if (ret)
- pr_err("Cannot save microcode patches from initrd.\n");
- else
- show_saved_mc();
+ if (!load_builtin_intel_microcode(&cp))
+ cp = find_microcode_in_initrd(ucode_path, false);
- return ret;
-}
+ if (!(cp.data && cp.size))
+ return 0;
-static __init enum ucode_state
-__scan_microcode_initrd(struct cpio_data *cd, struct ucode_blobs *blbp)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
- char *p = IS_ENABLED(CONFIG_X86_32) ? (char *)__pa_nodebug(ucode_name)
- : ucode_name;
-# ifdef CONFIG_X86_32
- unsigned long start = 0, size;
- struct boot_params *params;
+ collect_cpu_info_early(&uci);
- params = (struct boot_params *)__pa_nodebug(&boot_params);
- size = params->hdr.ramdisk_size;
+ scan_microcode(cp.data, cp.size, &uci, true);
- /*
- * Set start only if we have an initrd image. We cannot use initrd_start
- * because it is not set that early yet.
- */
- start = (size ? params->hdr.ramdisk_image : 0);
+ show_saved_mc();
-# else /* CONFIG_X86_64 */
- unsigned long start = 0, size;
+ return 0;
+}
- size = (u64)boot_params.ext_ramdisk_size << 32;
- size |= boot_params.hdr.ramdisk_size;
- if (size) {
- start = (u64)boot_params.ext_ramdisk_image << 32;
- start |= boot_params.hdr.ramdisk_image;
+/*
+ * @res_patch, output: a pointer to the patch we found.
+ */
+static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci)
+{
+ static const char *path;
+ struct cpio_data cp;
+ bool use_pa;
- start += PAGE_OFFSET;
+ if (IS_ENABLED(CONFIG_X86_32)) {
+ path = (const char *)__pa_nodebug(ucode_path);
+ use_pa = true;
+ } else {
+ path = ucode_path;
+ use_pa = false;
}
-# endif
-
- *cd = find_cpio_data(p, (void *)start, size, NULL);
- if (cd->data) {
- blbp->start = start;
- blbp->valid = true;
- return UCODE_OK;
- } else
-#endif /* CONFIG_BLK_DEV_INITRD */
- return UCODE_ERROR;
-}
+ /* try built-in microcode first */
+ if (!load_builtin_intel_microcode(&cp))
+ cp = find_microcode_in_initrd(path, use_pa);
-static __init enum ucode_state
-scan_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- struct ucode_cpu_info *uci, struct ucode_blobs *blbp)
-{
- struct cpio_data cd = { NULL, 0, "" };
- enum ucode_state ret;
+ if (!(cp.data && cp.size))
+ return NULL;
- /* try built-in microcode first */
- if (load_builtin_intel_microcode(&cd))
- /*
- * Invalidate blobs as we might've gotten an initrd too,
- * supplied by the boot loader, by mistake or simply forgotten
- * there. That's fine, we ignore it since we've found builtin
- * microcode already.
- */
- blbp->valid = false;
- else {
- ret = __scan_microcode_initrd(&cd, blbp);
- if (ret != UCODE_OK)
- return ret;
- }
+ collect_cpu_info_early(uci);
- return get_matching_model_microcode(blbp->start, cd.data, cd.size,
- mcs, mc_ptrs, uci);
+ return scan_microcode(cp.data, cp.size, uci, false);
}
-static void __init
-_load_ucode_intel_bsp(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
- struct ucode_blobs *blbp)
+void __init load_ucode_intel_bsp(void)
{
+ struct microcode_intel *patch;
struct ucode_cpu_info uci;
- enum ucode_state ret;
-
- collect_cpu_info_early(&uci);
- ret = scan_microcode(mcs, mc_ptrs, &uci, blbp);
- if (ret != UCODE_OK)
+ patch = __load_ucode_intel(&uci);
+ if (!patch)
return;
- ret = load_microcode(mcs, mc_ptrs, blbp->start, &uci);
- if (ret != UCODE_OK)
- return;
+ uci.mc = patch;
apply_microcode_early(&uci, true);
}
-void __init load_ucode_intel_bsp(void)
+void load_ucode_intel_ap(void)
{
- struct ucode_blobs *blobs_p;
- struct mc_saved_data *mcs;
- unsigned long *ptrs;
+ struct microcode_intel *patch, **iup;
+ struct ucode_cpu_info uci;
-#ifdef CONFIG_X86_32
- mcs = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
- ptrs = (unsigned long *)__pa_nodebug(&mc_tmp_ptrs);
- blobs_p = (struct ucode_blobs *)__pa_nodebug(&blobs);
-#else
- mcs = &mc_saved_data;
- ptrs = mc_tmp_ptrs;
- blobs_p = &blobs;
-#endif
+ if (IS_ENABLED(CONFIG_X86_32))
+ iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch);
+ else
+ iup = &intel_ucode_patch;
+
+reget:
+ if (!*iup) {
+ patch = __load_ucode_intel(&uci);
+ if (!patch)
+ return;
+
+ *iup = patch;
+ }
- _load_ucode_intel_bsp(mcs, ptrs, blobs_p);
+ uci.mc = *iup;
+
+ if (apply_microcode_early(&uci, true)) {
+ /* Mixed-silicon system? Try to refetch the proper patch: */
+ *iup = NULL;
+
+ goto reget;
+ }
}
-void load_ucode_intel_ap(void)
+static struct microcode_intel *find_patch(struct ucode_cpu_info *uci)
{
- struct ucode_blobs *blobs_p;
- unsigned long *ptrs, start = 0;
- struct mc_saved_data *mcs;
- struct ucode_cpu_info uci;
- enum ucode_state ret;
+ struct microcode_header_intel *phdr;
+ struct ucode_patch *iter, *tmp;
-#ifdef CONFIG_X86_32
- mcs = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
- ptrs = (unsigned long *)__pa_nodebug(mc_tmp_ptrs);
- blobs_p = (struct ucode_blobs *)__pa_nodebug(&blobs);
-#else
- mcs = &mc_saved_data;
- ptrs = mc_tmp_ptrs;
- blobs_p = &blobs;
-#endif
-
- /*
- * If there is no valid ucode previously saved in memory, no need to
- * update ucode on this AP.
- */
- if (!mcs->num_saved)
- return;
+ list_for_each_entry_safe(iter, tmp, &microcode_cache, plist) {
- if (blobs_p->valid) {
- start = blobs_p->start;
+ phdr = (struct microcode_header_intel *)iter->data;
- /*
- * Pay attention to CONFIG_RANDOMIZE_MEMORY=y as it shuffles
- * physmem mapping too and there we have the initrd.
- */
- start += PAGE_OFFSET - __PAGE_OFFSET_BASE;
- }
+ if (phdr->rev <= uci->cpu_sig.rev)
+ continue;
- collect_cpu_info_early(&uci);
- ret = load_microcode(mcs, ptrs, start, &uci);
- if (ret != UCODE_OK)
- return;
+ if (!find_matching_signature(phdr,
+ uci->cpu_sig.sig,
+ uci->cpu_sig.pf))
+ continue;
- apply_microcode_early(&uci, true);
+ return iter->data;
+ }
+ return NULL;
}
void reload_ucode_intel(void)
{
+ struct microcode_intel *p;
struct ucode_cpu_info uci;
- enum ucode_state ret;
-
- if (!mc_saved_data.num_saved)
- return;
collect_cpu_info_early(&uci);
- ret = find_microcode_patch(mc_saved_data.mc_saved,
- mc_saved_data.num_saved, &uci);
- if (ret != UCODE_OK)
+ p = find_patch(&uci);
+ if (!p)
return;
+ uci.mc = p;
+
apply_microcode_early(&uci, false);
}
@@ -879,24 +779,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
return 0;
}
-/*
- * return 0 - no update found
- * return 1 - found update
- */
-static int get_matching_mc(struct microcode_intel *mc, int cpu)
-{
- struct cpu_signature cpu_sig;
- unsigned int csig, cpf, crev;
-
- collect_cpu_info(cpu, &cpu_sig);
-
- csig = cpu_sig.sig;
- cpf = cpu_sig.pf;
- crev = cpu_sig.rev;
-
- return has_newer_microcode(mc, csig, cpf, crev);
-}
-
static int apply_microcode_intel(int cpu)
{
struct microcode_intel *mc;
@@ -911,16 +793,12 @@ static int apply_microcode_intel(int cpu)
uci = ucode_cpu_info + cpu;
mc = uci->mc;
- if (!mc)
- return 0;
-
- /*
- * Microcode on this CPU could be updated earlier. Only apply the
- * microcode patch in mc when it is newer than the one on this
- * CPU.
- */
- if (!get_matching_mc(mc, cpu))
- return 0;
+ if (!mc) {
+ /* Look for a newer patch in our cache: */
+ mc = find_patch(uci);
+ if (!mc)
+ return 0;
+ }
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
@@ -962,7 +840,6 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
- enum ucode_state state = UCODE_OK;
unsigned int curr_mc_size = 0;
unsigned int csig, cpf;
@@ -1015,14 +892,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
if (leftover) {
vfree(new_mc);
- state = UCODE_ERROR;
- goto out;
+ return UCODE_ERROR;
}
- if (!new_mc) {
- state = UCODE_NFOUND;
- goto out;
- }
+ if (!new_mc)
+ return UCODE_NFOUND;
vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
@@ -1032,12 +906,12 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
* permanent memory. So it will be loaded early when a CPU is hot added
* or resumes.
*/
- save_mc_for_early(new_mc);
+ save_mc_for_early(new_mc, curr_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
-out:
- return state;
+
+ return UCODE_OK;
}
static int get_ucode_fw(void *to, const void *from, size_t n)
@@ -1081,20 +955,11 @@ request_microcode_user(int cpu, const void __user *buf, size_t size)
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}
-static void microcode_fini_cpu(int cpu)
-{
- struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
-
- vfree(uci->mc);
- uci->mc = NULL;
-}
-
static struct microcode_ops microcode_intel_ops = {
.request_microcode_user = request_microcode_user,
.request_microcode_fw = request_microcode_fw,
.collect_cpu_info = collect_cpu_info,
.apply_microcode = apply_microcode_intel,
- .microcode_fini_cpu = microcode_fini_cpu,
};
struct microcode_ops * __init init_intel_microcode(void)
@@ -1109,4 +974,3 @@ struct microcode_ops * __init init_intel_microcode(void)
return &microcode_intel_ops;
}
-
diff --git a/arch/x86/kernel/cpu/microcode/intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c
deleted file mode 100644
index 406cb6c0d9dd..000000000000
--- a/arch/x86/kernel/cpu/microcode/intel_lib.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Intel CPU Microcode Update Driver for Linux
- *
- * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
- * H Peter Anvin" <hpa@zytor.com>
- *
- * This driver allows to upgrade microcode on Intel processors
- * belonging to IA-32 family - PentiumPro, Pentium II,
- * Pentium III, Xeon, Pentium 4, etc.
- *
- * Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
- * Software Developer's Manual
- * Order Number 253668 or free download from:
- *
- * http://developer.intel.com/Assets/PDF/manual/253668.pdf
- *
- * For more information, go to http://www.urbanmyth.org/microcode
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- */
-#include <linux/firmware.h>
-#include <linux/uaccess.h>
-#include <linux/kernel.h>
-
-#include <asm/microcode_intel.h>
-#include <asm/processor.h>
-#include <asm/msr.h>
-
-static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
- unsigned int s2, unsigned int p2)
-{
- if (s1 != s2)
- return false;
-
- /* Processor flags are either both 0 ... */
- if (!p1 && !p2)
- return true;
-
- /* ... or they intersect. */
- return p1 & p2;
-}
-
-int microcode_sanity_check(void *mc, int print_err)
-{
- unsigned long total_size, data_size, ext_table_size;
- struct microcode_header_intel *mc_header = mc;
- struct extended_sigtable *ext_header = NULL;
- u32 sum, orig_sum, ext_sigcount = 0, i;
- struct extended_signature *ext_sig;
-
- total_size = get_totalsize(mc_header);
- data_size = get_datasize(mc_header);
-
- if (data_size + MC_HEADER_SIZE > total_size) {
- if (print_err)
- pr_err("Error: bad microcode data file size.\n");
- return -EINVAL;
- }
-
- if (mc_header->ldrver != 1 || mc_header->hdrver != 1) {
- if (print_err)
- pr_err("Error: invalid/unknown microcode update format.\n");
- return -EINVAL;
- }
-
- ext_table_size = total_size - (MC_HEADER_SIZE + data_size);
- if (ext_table_size) {
- u32 ext_table_sum = 0;
- u32 *ext_tablep;
-
- if ((ext_table_size < EXT_HEADER_SIZE)
- || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) {
- if (print_err)
- pr_err("Error: truncated extended signature table.\n");
- return -EINVAL;
- }
-
- ext_header = mc + MC_HEADER_SIZE + data_size;
- if (ext_table_size != exttable_size(ext_header)) {
- if (print_err)
- pr_err("Error: extended signature table size mismatch.\n");
- return -EFAULT;
- }
-
- ext_sigcount = ext_header->count;
-
- /*
- * Check extended table checksum: the sum of all dwords that
- * comprise a valid table must be 0.
- */
- ext_tablep = (u32 *)ext_header;
-
- i = ext_table_size / sizeof(u32);
- while (i--)
- ext_table_sum += ext_tablep[i];
-
- if (ext_table_sum) {
- if (print_err)
- pr_warn("Bad extended signature table checksum, aborting.\n");
- return -EINVAL;
- }
- }
-
- /*
- * Calculate the checksum of update data and header. The checksum of
- * valid update data and header including the extended signature table
- * must be 0.
- */
- orig_sum = 0;
- i = (MC_HEADER_SIZE + data_size) / sizeof(u32);
- while (i--)
- orig_sum += ((u32 *)mc)[i];
-
- if (orig_sum) {
- if (print_err)
- pr_err("Bad microcode data checksum, aborting.\n");
- return -EINVAL;
- }
-
- if (!ext_table_size)
- return 0;
-
- /*
- * Check extended signature checksum: 0 => valid.
- */
- for (i = 0; i < ext_sigcount; i++) {
- ext_sig = (void *)ext_header + EXT_HEADER_SIZE +
- EXT_SIGNATURE_SIZE * i;
-
- sum = (mc_header->sig + mc_header->pf + mc_header->cksum) -
- (ext_sig->sig + ext_sig->pf + ext_sig->cksum);
- if (sum) {
- if (print_err)
- pr_err("Bad extended signature checksum, aborting.\n");
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-int find_matching_signature(void *mc, unsigned int csig, int cpf)
-{
- struct microcode_header_intel *mc_hdr = mc;
- struct extended_sigtable *ext_hdr;
- struct extended_signature *ext_sig;
- int i;
-
- if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
- return 1;
-
- /* Look for ext. headers: */
- if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
- return 0;
-
- ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
- ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
-
- for (i = 0; i < ext_hdr->count; i++) {
- if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
- return 1;
- ext_sig++;
- }
- return 0;
-}
-
-/*
- * Returns 1 if update has been found, 0 otherwise.
- */
-int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
-{
- struct microcode_header_intel *mc_hdr = mc;
-
- if (mc_hdr->rev <= new_rev)
- return 0;
-
- return find_matching_signature(mc, csig, cpf);
-}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 1db8dc490b66..d9794060fe22 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -17,11 +17,20 @@ struct cpuid_bit {
u32 sub_leaf;
};
-enum cpuid_regs {
- CR_EAX = 0,
- CR_ECX,
- CR_EDX,
- CR_EBX
+/* Please keep the leaf sorted by cpuid_bit.level for faster search. */
+static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
+ { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
+ { X86_FEATURE_INTEL_PT, CPUID_EBX, 25, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4VNNIW, CPUID_EDX, 2, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4FMAPS, CPUID_EDX, 3, 0x00000007, 0 },
+ { X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
+ { X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
+ { X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
+ { X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { 0, 0, 0, 0, 0 }
};
void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
@@ -30,18 +39,6 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
u32 regs[4];
const struct cpuid_bit *cb;
- static const struct cpuid_bit cpuid_bits[] = {
- { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
- { X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
- { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
- { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
- { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
- { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
- { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
- { 0, 0, 0, 0, 0 }
- };
-
for (cb = cpuid_bits; cb->feature; cb++) {
/* Verify that the level is valid */
@@ -50,10 +47,35 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
max_level > (cb->level | 0xffff))
continue;
- cpuid_count(cb->level, cb->sub_leaf, &regs[CR_EAX],
- &regs[CR_EBX], &regs[CR_ECX], &regs[CR_EDX]);
+ cpuid_count(cb->level, cb->sub_leaf, &regs[CPUID_EAX],
+ &regs[CPUID_EBX], &regs[CPUID_ECX],
+ &regs[CPUID_EDX]);
if (regs[cb->reg] & (1 << cb->bit))
set_cpu_cap(c, cb->feature);
}
}
+
+u32 get_scattered_cpuid_leaf(unsigned int level, unsigned int sub_leaf,
+ enum cpuid_regs_idx reg)
+{
+ const struct cpuid_bit *cb;
+ u32 cpuid_val = 0;
+
+ for (cb = cpuid_bits; cb->feature; cb++) {
+
+ if (level > cb->level)
+ continue;
+
+ if (level < cb->level)
+ break;
+
+ if (reg == cb->reg && sub_leaf == cb->sub_leaf) {
+ if (cpu_has(&boot_cpu_data, cb->feature))
+ cpuid_val |= BIT(cb->bit);
+ }
+ }
+
+ return cpuid_val;
+}
+EXPORT_SYMBOL_GPL(get_scattered_cpuid_leaf);
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 5130985b758b..6703ab3c0b0b 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -24,11 +24,16 @@
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/clocksource.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
#include <asm/timer.h>
#include <asm/apic.h>
+#include <asm/timer.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) "vmware: " fmt
#define CPUID_VMWARE_INFO_LEAF 0x40000000
#define VMWARE_HYPERVISOR_MAGIC 0x564D5868
@@ -48,6 +53,8 @@
"2"(VMWARE_HYPERVISOR_PORT), "3"(UINT_MAX) : \
"memory");
+static unsigned long vmware_tsc_khz __ro_after_init;
+
static inline int __vmware_platform(void)
{
uint32_t eax, ebx, ecx, edx;
@@ -57,35 +64,80 @@ static inline int __vmware_platform(void)
static unsigned long vmware_get_tsc_khz(void)
{
- uint64_t tsc_hz, lpj;
- uint32_t eax, ebx, ecx, edx;
+ return vmware_tsc_khz;
+}
- VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
+#ifdef CONFIG_PARAVIRT
+static struct cyc2ns_data vmware_cyc2ns __ro_after_init;
+static int vmw_sched_clock __initdata = 1;
- tsc_hz = eax | (((uint64_t)ebx) << 32);
- do_div(tsc_hz, 1000);
- BUG_ON(tsc_hz >> 32);
- pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
- (unsigned long) tsc_hz / 1000,
- (unsigned long) tsc_hz % 1000);
-
- if (!preset_lpj) {
- lpj = ((u64)tsc_hz * 1000);
- do_div(lpj, HZ);
- preset_lpj = lpj;
- }
+static __init int setup_vmw_sched_clock(char *s)
+{
+ vmw_sched_clock = 0;
+ return 0;
+}
+early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
+
+static unsigned long long vmware_sched_clock(void)
+{
+ unsigned long long ns;
- return tsc_hz;
+ ns = mul_u64_u32_shr(rdtsc(), vmware_cyc2ns.cyc2ns_mul,
+ vmware_cyc2ns.cyc2ns_shift);
+ ns -= vmware_cyc2ns.cyc2ns_offset;
+ return ns;
}
+static void __init vmware_sched_clock_setup(void)
+{
+ struct cyc2ns_data *d = &vmware_cyc2ns;
+ unsigned long long tsc_now = rdtsc();
+
+ clocks_calc_mult_shift(&d->cyc2ns_mul, &d->cyc2ns_shift,
+ vmware_tsc_khz, NSEC_PER_MSEC, 0);
+ d->cyc2ns_offset = mul_u64_u32_shr(tsc_now, d->cyc2ns_mul,
+ d->cyc2ns_shift);
+
+ pv_time_ops.sched_clock = vmware_sched_clock;
+ pr_info("using sched offset of %llu ns\n", d->cyc2ns_offset);
+}
+
+static void __init vmware_paravirt_ops_setup(void)
+{
+ pv_info.name = "VMware hypervisor";
+ pv_cpu_ops.io_delay = paravirt_nop;
+
+ if (vmware_tsc_khz && vmw_sched_clock)
+ vmware_sched_clock_setup();
+}
+#else
+#define vmware_paravirt_ops_setup() do {} while (0)
+#endif
+
static void __init vmware_platform_setup(void)
{
uint32_t eax, ebx, ecx, edx;
+ uint64_t lpj, tsc_khz;
VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
if (ebx != UINT_MAX) {
+ lpj = tsc_khz = eax | (((uint64_t)ebx) << 32);
+ do_div(tsc_khz, 1000);
+ WARN_ON(tsc_khz >> 32);
+ pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
+ (unsigned long) tsc_khz / 1000,
+ (unsigned long) tsc_khz % 1000);
+
+ if (!preset_lpj) {
+ do_div(lpj, HZ);
+ preset_lpj = lpj;
+ }
+
+ vmware_tsc_khz = tsc_khz;
x86_platform.calibrate_tsc = vmware_get_tsc_khz;
+ x86_platform.calibrate_cpu = vmware_get_tsc_khz;
+
#ifdef CONFIG_X86_LOCAL_APIC
/* Skip lapic calibration since we know the bus frequency. */
lapic_timer_frequency = ecx / HZ;
@@ -99,6 +151,8 @@ static void __init vmware_platform_setup(void)
#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;
#endif
+
+ vmware_paravirt_ops_setup();
}
/*
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 2836de390f95..0931a105ffe1 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -45,10 +45,7 @@
#include <asm/msr.h>
static struct class *cpuid_class;
-
-struct cpuid_regs {
- u32 eax, ebx, ecx, edx;
-};
+static enum cpuhp_state cpuhp_cpuid_state;
static void cpuid_smp_cpuid(void *cmd_block)
{
@@ -115,7 +112,7 @@ static const struct file_operations cpuid_fops = {
.open = cpuid_open,
};
-static int cpuid_device_create(int cpu)
+static int cpuid_device_create(unsigned int cpu)
{
struct device *dev;
@@ -124,35 +121,12 @@ static int cpuid_device_create(int cpu)
return PTR_ERR_OR_ZERO(dev);
}
-static void cpuid_device_destroy(int cpu)
+static int cpuid_device_destroy(unsigned int cpu)
{
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ return 0;
}
-static int cpuid_class_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int err = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- err = cpuid_device_create(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- cpuid_device_destroy(cpu);
- break;
- }
- return notifier_from_errno(err);
-}
-
-static struct notifier_block cpuid_class_cpu_notifier =
-{
- .notifier_call = cpuid_class_cpu_callback,
-};
-
static char *cpuid_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
@@ -160,15 +134,13 @@ static char *cpuid_devnode(struct device *dev, umode_t *mode)
static int __init cpuid_init(void)
{
- int i, err = 0;
- i = 0;
+ int err;
if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS,
"cpu/cpuid", &cpuid_fops)) {
printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n",
CPUID_MAJOR);
- err = -EBUSY;
- goto out;
+ return -EBUSY;
}
cpuid_class = class_create(THIS_MODULE, "cpuid");
if (IS_ERR(cpuid_class)) {
@@ -177,45 +149,28 @@ static int __init cpuid_init(void)
}
cpuid_class->devnode = cpuid_devnode;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- err = cpuid_device_create(i);
- if (err != 0)
- goto out_class;
- }
- __register_hotcpu_notifier(&cpuid_class_cpu_notifier);
- cpu_notifier_register_done();
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/cpuid:online",
+ cpuid_device_create, cpuid_device_destroy);
+ if (err < 0)
+ goto out_class;
- err = 0;
- goto out;
+ cpuhp_cpuid_state = err;
+ return 0;
out_class:
- i = 0;
- for_each_online_cpu(i) {
- cpuid_device_destroy(i);
- }
- cpu_notifier_register_done();
class_destroy(cpuid_class);
out_chrdev:
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
-out:
return err;
}
+module_init(cpuid_init);
static void __exit cpuid_exit(void)
{
- int cpu = 0;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- cpuid_device_destroy(cpu);
+ cpuhp_remove_state(cpuhp_cpuid_state);
class_destroy(cpuid_class);
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
- __unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
- cpu_notifier_register_done();
}
-
-module_init(cpuid_init);
module_exit(cpuid_exit);
MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 85f854b98a9d..0cfd01d2754c 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -22,7 +22,6 @@
int panic_on_unrecovered_nmi;
int panic_on_io_nmi;
unsigned int code_bytes = 64;
-int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
static int die_counter;
bool in_task_stack(unsigned long *stack, struct task_struct *task,
@@ -46,14 +45,7 @@ static void printk_stack_address(unsigned long address, int reliable,
char *log_lvl)
{
touch_nmi_watchdog();
- printk("%s [<%p>] %s%pB\n",
- log_lvl, (void *)address, reliable ? "" : "? ",
- (void *)address);
-}
-
-void printk_address(unsigned long address)
-{
- pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
+ printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
}
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
@@ -67,6 +59,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
+ stack = stack ? : get_stack_pointer(task, regs);
/*
* Iterate through the stacks, starting with the current stack pointer.
@@ -82,8 +75,8 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - softirq stack
* - hardirq stack
*/
- for (; stack; stack = stack_info.next_sp) {
- const char *str_begin, *str_end;
+ for (regs = NULL; stack; stack = stack_info.next_sp) {
+ const char *stack_name;
/*
* If we overflowed the task stack into a guard page, jump back
@@ -95,9 +88,9 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (get_stack_info(stack, task, &stack_info, &visit_mask))
break;
- stack_type_str(stack_info.type, &str_begin, &str_end);
- if (str_begin)
- printk("%s <%s> ", log_lvl, str_begin);
+ stack_name = stack_type_name(stack_info.type);
+ if (stack_name)
+ printk("%s <%s>\n", log_lvl, stack_name);
/*
* Scan the stack, printing any text addresses we find. At the
@@ -119,6 +112,15 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (!__kernel_text_address(addr))
continue;
+ /*
+ * Don't print regs->ip again if it was already printed
+ * by __show_regs() below.
+ */
+ if (regs && stack == &regs->ip) {
+ unwind_next_frame(&state);
+ continue;
+ }
+
if (stack == ret_addr_p)
reliable = 1;
@@ -146,10 +148,15 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* of the addresses will just be printed as unreliable.
*/
unwind_next_frame(&state);
+
+ /* if the frame has entry regs, print them */
+ regs = unwind_get_entry_regs(&state);
+ if (regs)
+ __show_regs(regs, 0);
}
- if (str_end)
- printk("%s <%s> ", log_lvl, str_end);
+ if (stack_name)
+ printk("%s </%s>\n", log_lvl, stack_name);
}
}
@@ -164,12 +171,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (!sp && task == current)
sp = get_stack_pointer(current, NULL);
- show_stack_log_lvl(task, NULL, sp, "");
+ show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
}
void show_stack_regs(struct pt_regs *regs)
{
- show_stack_log_lvl(current, regs, NULL, "");
+ show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
}
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -261,14 +268,11 @@ int __die(const char *str, struct pt_regs *regs, long err)
sp = kernel_stack_pointer(regs);
savesegment(ss, ss);
}
- printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
- print_symbol("%s", regs->ip);
- printk(" SS:ESP %04x:%08lx\n", ss, sp);
+ printk(KERN_EMERG "EIP: %pS SS:ESP: %04x:%08lx\n",
+ (void *)regs->ip, ss, sp);
#else
/* Executive summary in case the oops scrolled away */
- printk(KERN_ALERT "RIP ");
- printk_address(regs->ip);
- printk(" RSP <%016lx>\n", regs->sp);
+ printk(KERN_ALERT "RIP: %pS RSP: %016lx\n", (void *)regs->ip, regs->sp);
#endif
return 0;
}
@@ -291,22 +295,6 @@ void die(const char *str, struct pt_regs *regs, long err)
oops_end(flags, regs, sig);
}
-static int __init kstack_setup(char *s)
-{
- ssize_t ret;
- unsigned long val;
-
- if (!s)
- return -EINVAL;
-
- ret = kstrtoul(s, 0, &val);
- if (ret)
- return ret;
- kstack_depth_to_print = val;
- return 0;
-}
-early_param("kstack", kstack_setup);
-
static int __init code_bytes_setup(char *s)
{
ssize_t ret;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 06eb322b5f9f..bb3b5b9a6899 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -16,18 +16,15 @@
#include <asm/stacktrace.h>
-void stack_type_str(enum stack_type type, const char **begin, const char **end)
+const char *stack_type_name(enum stack_type type)
{
- switch (type) {
- case STACK_TYPE_IRQ:
- case STACK_TYPE_SOFTIRQ:
- *begin = "IRQ";
- *end = "EOI";
- break;
- default:
- *begin = NULL;
- *end = NULL;
- }
+ if (type == STACK_TYPE_IRQ)
+ return "IRQ";
+
+ if (type == STACK_TYPE_SOFTIRQ)
+ return "SOFTIRQ";
+
+ return NULL;
}
static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info)
@@ -109,8 +106,10 @@ recursion_check:
* just break out and report an unknown stack type.
*/
if (visit_mask) {
- if (*visit_mask & (1UL << info->type))
+ if (*visit_mask & (1UL << info->type)) {
+ printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
goto unknown;
+ }
*visit_mask |= 1UL << info->type;
}
@@ -121,36 +120,6 @@ unknown:
return -EINVAL;
}
-void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl)
-{
- unsigned long *stack;
- int i;
-
- if (!try_get_task_stack(task))
- return;
-
- sp = sp ? : get_stack_pointer(task, regs);
-
- stack = sp;
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (kstack_end(stack))
- break;
- if ((i % STACKSLOTS_PER_LINE) == 0) {
- if (i != 0)
- pr_cont("\n");
- printk("%s %08lx", log_lvl, *stack++);
- } else
- pr_cont(" %08lx", *stack++);
- touch_nmi_watchdog();
- }
- pr_cont("\n");
- show_trace_log_lvl(task, regs, sp, log_lvl);
-
- put_task_stack(task);
-}
-
-
void show_regs(struct pt_regs *regs)
{
int i;
@@ -168,8 +137,7 @@ void show_regs(struct pt_regs *regs)
unsigned char c;
u8 *ip;
- pr_emerg("Stack:\n");
- show_stack_log_lvl(current, regs, NULL, KERN_EMERG);
+ show_trace_log_lvl(current, regs, NULL, KERN_EMERG);
pr_emerg("Code:");
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 36cf1a498227..fac189efcc34 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -28,23 +28,17 @@ static unsigned long exception_stack_sizes[N_EXCEPTION_STACKS] = {
[DEBUG_STACK - 1] = DEBUG_STKSZ
};
-void stack_type_str(enum stack_type type, const char **begin, const char **end)
+const char *stack_type_name(enum stack_type type)
{
BUILD_BUG_ON(N_EXCEPTION_STACKS != 4);
- switch (type) {
- case STACK_TYPE_IRQ:
- *begin = "IRQ";
- *end = "EOI";
- break;
- case STACK_TYPE_EXCEPTION ... STACK_TYPE_EXCEPTION_LAST:
- *begin = exception_stack_names[type - STACK_TYPE_EXCEPTION];
- *end = "EOE";
- break;
- default:
- *begin = NULL;
- *end = NULL;
- }
+ if (type == STACK_TYPE_IRQ)
+ return "IRQ";
+
+ if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
+ return exception_stack_names[type - STACK_TYPE_EXCEPTION];
+
+ return NULL;
}
static bool in_exception_stack(unsigned long *stack, struct stack_info *info)
@@ -128,8 +122,10 @@ recursion_check:
* just break out and report an unknown stack type.
*/
if (visit_mask) {
- if (*visit_mask & (1UL << info->type))
+ if (*visit_mask & (1UL << info->type)) {
+ printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
goto unknown;
+ }
*visit_mask |= 1UL << info->type;
}
@@ -140,56 +136,6 @@ unknown:
return -EINVAL;
}
-void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
- unsigned long *sp, char *log_lvl)
-{
- unsigned long *irq_stack_end;
- unsigned long *irq_stack;
- unsigned long *stack;
- int i;
-
- if (!try_get_task_stack(task))
- return;
-
- irq_stack_end = (unsigned long *)this_cpu_read(irq_stack_ptr);
- irq_stack = irq_stack_end - (IRQ_STACK_SIZE / sizeof(long));
-
- sp = sp ? : get_stack_pointer(task, regs);
-
- stack = sp;
- for (i = 0; i < kstack_depth_to_print; i++) {
- unsigned long word;
-
- if (stack >= irq_stack && stack <= irq_stack_end) {
- if (stack == irq_stack_end) {
- stack = (unsigned long *) (irq_stack_end[-1]);
- pr_cont(" <EOI> ");
- }
- } else {
- if (kstack_end(stack))
- break;
- }
-
- if (probe_kernel_address(stack, word))
- break;
-
- if ((i % STACKSLOTS_PER_LINE) == 0) {
- if (i != 0)
- pr_cont("\n");
- printk("%s %016lx", log_lvl, word);
- } else
- pr_cont(" %016lx", word);
-
- stack++;
- touch_nmi_watchdog();
- }
-
- pr_cont("\n");
- show_trace_log_lvl(task, regs, sp, log_lvl);
-
- put_task_stack(task);
-}
-
void show_regs(struct pt_regs *regs)
{
int i;
@@ -207,8 +153,7 @@ void show_regs(struct pt_regs *regs)
unsigned char c;
u8 *ip;
- printk(KERN_DEFAULT "Stack:\n");
- show_stack_log_lvl(current, regs, NULL, KERN_DEFAULT);
+ show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
printk(KERN_DEFAULT "Code: ");
diff --git a/arch/x86/kernel/fpu/bugs.c b/arch/x86/kernel/fpu/bugs.c
index aad34aafc0e0..d913047f832c 100644
--- a/arch/x86/kernel/fpu/bugs.c
+++ b/arch/x86/kernel/fpu/bugs.c
@@ -23,17 +23,12 @@ static double __initdata y = 3145727.0;
*/
void __init fpu__init_check_bugs(void)
{
- u32 cr0_saved;
s32 fdiv_bug;
/* kernel_fpu_begin/end() relies on patched alternative instructions. */
if (!boot_cpu_has(X86_FEATURE_FPU))
return;
- /* We might have CR0::TS set already, clear it: */
- cr0_saved = read_cr0();
- write_cr0(cr0_saved & ~X86_CR0_TS);
-
kernel_fpu_begin();
/*
@@ -56,8 +51,6 @@ void __init fpu__init_check_bugs(void)
kernel_fpu_end();
- write_cr0(cr0_saved);
-
if (fdiv_bug) {
set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
pr_warn("Hmm, FPU with FDIV bug\n");
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index ebb4e95fbd74..e4e97a5355ce 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -58,27 +58,9 @@ static bool kernel_fpu_disabled(void)
return this_cpu_read(in_kernel_fpu);
}
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
static bool interrupted_kernel_fpu_idle(void)
{
- if (kernel_fpu_disabled())
- return false;
-
- if (use_eager_fpu())
- return true;
-
- return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+ return !kernel_fpu_disabled();
}
/*
@@ -125,8 +107,7 @@ void __kernel_fpu_begin(void)
*/
copy_fpregs_to_fpstate(fpu);
} else {
- this_cpu_write(fpu_fpregs_owner_ctx, NULL);
- __fpregs_activate_hw();
+ __cpu_invalidate_fpregs_state();
}
}
EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -137,8 +118,6 @@ void __kernel_fpu_end(void)
if (fpu->fpregs_active)
copy_kernel_to_fpregs(&fpu->state);
- else
- __fpregs_deactivate_hw();
kernel_fpu_enable();
}
@@ -159,35 +138,6 @@ void kernel_fpu_end(void)
EXPORT_SYMBOL_GPL(kernel_fpu_end);
/*
- * CR0::TS save/restore functions:
- */
-int irq_ts_save(void)
-{
- /*
- * If in process context and not atomic, we can take a spurious DNA fault.
- * Otherwise, doing clts() in process context requires disabling preemption
- * or some heavy lifting like kernel_fpu_begin()
- */
- if (!in_atomic())
- return 0;
-
- if (read_cr0() & X86_CR0_TS) {
- clts();
- return 1;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(irq_ts_save);
-
-void irq_ts_restore(int TS_state)
-{
- if (TS_state)
- stts();
-}
-EXPORT_SYMBOL_GPL(irq_ts_restore);
-
-/*
* Save the FPU state (mark it for reload if necessary):
*
* This only ever gets called for the current task.
@@ -200,10 +150,7 @@ void fpu__save(struct fpu *fpu)
trace_x86_fpu_before_save(fpu);
if (fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(fpu)) {
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&fpu->state);
- else
- fpregs_deactivate(fpu);
+ copy_kernel_to_fpregs(&fpu->state);
}
}
trace_x86_fpu_after_save(fpu);
@@ -247,7 +194,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
@@ -260,8 +206,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* Don't let 'init optimized' areas of the XSAVE area
* leak into the child task:
*/
- if (use_eager_fpu())
- memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
+ memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
/*
* Save current FPU registers directly into the child
@@ -283,10 +228,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
memcpy(&src_fpu->state, &dst_fpu->state,
fpu_kernel_xstate_size);
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&src_fpu->state);
- else
- fpregs_deactivate(src_fpu);
+ copy_kernel_to_fpregs(&src_fpu->state);
}
preempt_enable();
@@ -366,7 +308,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
if (fpu->fpstate_active) {
/* Invalidate any lazy state: */
- fpu->last_cpu = -1;
+ __fpu_invalidate_fpregs_state(fpu);
} else {
fpstate_init(&fpu->state);
trace_x86_fpu_init_state(fpu);
@@ -409,7 +351,7 @@ void fpu__current_fpstate_write_begin(void)
* ensures we will not be lazy and skip a XRSTOR in the
* future.
*/
- fpu->last_cpu = -1;
+ __fpu_invalidate_fpregs_state(fpu);
}
/*
@@ -459,7 +401,6 @@ void fpu__restore(struct fpu *fpu)
trace_x86_fpu_before_restore(fpu);
fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state);
- fpu->counter++;
trace_x86_fpu_after_restore(fpu);
kernel_fpu_enable();
}
@@ -477,7 +418,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__drop(struct fpu *fpu)
{
preempt_disable();
- fpu->counter = 0;
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 2f2b8c7ccb85..60dece392b3a 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -10,18 +10,6 @@
#include <linux/init.h>
/*
- * Initialize the TS bit in CR0 according to the style of context-switches
- * we are using:
- */
-static void fpu__init_cpu_ctx_switch(void)
-{
- if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
- stts();
- else
- clts();
-}
-
-/*
* Initialize the registers found in all CPUs, CR0 and CR4:
*/
static void fpu__init_cpu_generic(void)
@@ -58,7 +46,6 @@ void fpu__init_cpu(void)
{
fpu__init_cpu_generic();
fpu__init_cpu_xstate();
- fpu__init_cpu_ctx_switch();
}
/*
@@ -233,82 +220,16 @@ static void __init fpu__init_system_xstate_size_legacy(void)
}
/*
- * FPU context switching strategies:
- *
- * Against popular belief, we don't do lazy FPU saves, due to the
- * task migration complications it brings on SMP - we only do
- * lazy FPU restores.
- *
- * 'lazy' is the traditional strategy, which is based on setting
- * CR0::TS to 1 during context-switch (instead of doing a full
- * restore of the FPU state), which causes the first FPU instruction
- * after the context switch (whenever it is executed) to fault - at
- * which point we lazily restore the FPU state into FPU registers.
- *
- * Tasks are of course under no obligation to execute FPU instructions,
- * so it can easily happen that another context-switch occurs without
- * a single FPU instruction being executed. If we eventually switch
- * back to the original task (that still owns the FPU) then we have
- * not only saved the restores along the way, but we also have the
- * FPU ready to be used for the original task.
- *
- * 'lazy' is deprecated because it's almost never a performance win
- * and it's much more complicated than 'eager'.
- *
- * 'eager' switching is by default on all CPUs, there we switch the FPU
- * state during every context switch, regardless of whether the task
- * has used FPU instructions in that time slice or not. This is done
- * because modern FPU context saving instructions are able to optimize
- * state saving and restoration in hardware: they can detect both
- * unused and untouched FPU state and optimize accordingly.
- *
- * [ Note that even in 'lazy' mode we might optimize context switches
- * to use 'eager' restores, if we detect that a task is using the FPU
- * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
- */
-static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
-
-/*
* Find supported xfeatures based on cpu features and command-line input.
* This must be called after fpu__init_parse_early_param() is called and
* xfeatures_mask is enumerated.
*/
u64 __init fpu__get_supported_xfeatures_mask(void)
{
- /* Support all xfeatures known to us */
- if (eagerfpu != DISABLE)
- return XCNTXT_MASK;
-
- /* Warning of xfeatures being disabled for no eagerfpu mode */
- if (xfeatures_mask & XFEATURE_MASK_EAGER) {
- pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
- xfeatures_mask & XFEATURE_MASK_EAGER);
- }
-
- /* Return a mask that masks out all features requiring eagerfpu mode */
- return ~XFEATURE_MASK_EAGER;
+ return XCNTXT_MASK;
}
-/*
- * Disable features dependent on eagerfpu.
- */
-static void __init fpu__clear_eager_fpu_features(void)
-{
- setup_clear_cpu_cap(X86_FEATURE_MPX);
-}
-
-/*
- * Pick the FPU context switching strategy:
- *
- * When eagerfpu is AUTO or ENABLE, we ensure it is ENABLE if either of
- * the following is true:
- *
- * (1) the cpu has xsaveopt, as it has the optimization and doing eager
- * FPU switching has a relatively low cost compared to a plain xsave;
- * (2) the cpu has xsave features (e.g. MPX) that depend on eager FPU
- * switching. Should the kernel boot with noxsaveopt, we support MPX
- * with eager FPU switching at a higher cost.
- */
+/* Legacy code to initialize eager fpu mode. */
static void __init fpu__init_system_ctx_switch(void)
{
static bool on_boot_cpu __initdata = 1;
@@ -317,17 +238,6 @@ static void __init fpu__init_system_ctx_switch(void)
on_boot_cpu = 0;
WARN_ON_FPU(current->thread.fpu.fpstate_active);
-
- if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
- eagerfpu = ENABLE;
-
- if (xfeatures_mask & XFEATURE_MASK_EAGER)
- eagerfpu = ENABLE;
-
- if (eagerfpu == ENABLE)
- setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
-
- printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
}
/*
@@ -336,11 +246,6 @@ static void __init fpu__init_system_ctx_switch(void)
*/
static void __init fpu__init_parse_early_param(void)
{
- if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
- eagerfpu = DISABLE;
- fpu__clear_eager_fpu_features();
- }
-
if (cmdline_find_option_bool(boot_command_line, "no387"))
setup_clear_cpu_cap(X86_FEATURE_FPU);
@@ -375,14 +280,6 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
*/
fpu__init_cpu();
- /*
- * But don't leave CR0::TS set yet, as some of the FPU setup
- * methods depend on being able to execute FPU instructions
- * that will fault on a set TS, such as the FXSAVE in
- * fpu__init_system_mxcsr().
- */
- clts();
-
fpu__init_system_generic();
fpu__init_system_xstate_size_legacy();
fpu__init_system_xstate();
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index a184c210efba..83c23c230b4c 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -340,11 +340,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
}
fpu->fpstate_active = 1;
- if (use_eager_fpu()) {
- preempt_disable();
- fpu__restore(fpu);
- preempt_enable();
- }
+ preempt_disable();
+ fpu__restore(fpu);
+ preempt_enable();
return err;
} else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 095ef7ddd6ae..1d7770447b3e 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -65,6 +65,7 @@ void fpu__xstate_clear_all_cpu_caps(void)
setup_clear_cpu_cap(X86_FEATURE_AVX);
setup_clear_cpu_cap(X86_FEATURE_AVX2);
setup_clear_cpu_cap(X86_FEATURE_AVX512F);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512IFMA);
setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
@@ -73,6 +74,7 @@ void fpu__xstate_clear_all_cpu_caps(void)
setup_clear_cpu_cap(X86_FEATURE_AVX512VL);
setup_clear_cpu_cap(X86_FEATURE_MPX);
setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512VBMI);
setup_clear_cpu_cap(X86_FEATURE_PKU);
setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
@@ -890,15 +892,6 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
*/
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return -EINVAL;
- /*
- * For most XSAVE components, this would be an arduous task:
- * brining fpstate up to date with fpregs, updating fpstate,
- * then re-populating fpregs. But, for components that are
- * never lazily managed, we can just access the fpregs
- * directly. PKRU is never managed lazily, so we can just
- * manipulate it directly. Make sure it stays that way.
- */
- WARN_ON_ONCE(!use_eager_fpu());
/* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 2dabea46f039..4e8577d03372 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -63,6 +63,8 @@
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
#endif
+#define SIZEOF_PTREGS 17*4
+
/*
* Number of possible pages in the lowmem region.
*
@@ -248,19 +250,19 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
#ifdef CONFIG_PARAVIRT
/* This is can only trip for a broken bootloader... */
cmpw $0x207, pa(boot_params + BP_version)
- jb default_entry
+ jb .Ldefault_entry
/* Paravirt-compatible boot parameters. Look to see what architecture
we're booting under. */
movl pa(boot_params + BP_hardware_subarch), %eax
cmpl $num_subarch_entries, %eax
- jae bad_subarch
+ jae .Lbad_subarch
movl pa(subarch_entries)(,%eax,4), %eax
subl $__PAGE_OFFSET, %eax
jmp *%eax
-bad_subarch:
+.Lbad_subarch:
WEAK(lguest_entry)
WEAK(xen_entry)
/* Unknown implementation; there's really
@@ -270,14 +272,14 @@ WEAK(xen_entry)
__INITDATA
subarch_entries:
- .long default_entry /* normal x86/PC */
+ .long .Ldefault_entry /* normal x86/PC */
.long lguest_entry /* lguest hypervisor */
.long xen_entry /* Xen hypervisor */
- .long default_entry /* Moorestown MID */
+ .long .Ldefault_entry /* Moorestown MID */
num_subarch_entries = (. - subarch_entries) / 4
.previous
#else
- jmp default_entry
+ jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */
#ifdef CONFIG_HOTPLUG_CPU
@@ -289,7 +291,8 @@ num_subarch_entries = (. - subarch_entries) / 4
ENTRY(start_cpu0)
movl initial_stack, %ecx
movl %ecx, %esp
- jmp *(initial_code)
+ call *(initial_code)
+1: jmp 1b
ENDPROC(start_cpu0)
#endif
@@ -317,7 +320,7 @@ ENTRY(startup_32_smp)
call load_ucode_ap
#endif
-default_entry:
+.Ldefault_entry:
#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
X86_CR0_PG)
@@ -347,7 +350,7 @@ default_entry:
pushfl
popl %eax # get EFLAGS
testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set?
- jz enable_paging # hw disallowed setting of ID bit
+ jz .Lenable_paging # hw disallowed setting of ID bit
# which means no CPUID and no CR4
xorl %eax,%eax
@@ -357,13 +360,13 @@ default_entry:
movl $1,%eax
cpuid
andl $~1,%edx # Ignore CPUID.FPU
- jz enable_paging # No flags or only CPUID.FPU = no CR4
+ jz .Lenable_paging # No flags or only CPUID.FPU = no CR4
movl pa(mmu_cr4_features),%eax
movl %eax,%cr4
testb $X86_CR4_PAE, %al # check if PAE is enabled
- jz enable_paging
+ jz .Lenable_paging
/* Check if extended functions are implemented */
movl $0x80000000, %eax
@@ -371,7 +374,7 @@ default_entry:
/* Value must be in the range 0x80000001 to 0x8000ffff */
subl $0x80000001, %eax
cmpl $(0x8000ffff-0x80000001), %eax
- ja enable_paging
+ ja .Lenable_paging
/* Clear bogus XD_DISABLE bits */
call verify_cpu
@@ -380,7 +383,7 @@ default_entry:
cpuid
/* Execute Disable bit supported? */
btl $(X86_FEATURE_NX & 31), %edx
- jnc enable_paging
+ jnc .Lenable_paging
/* Setup EFER (Extended Feature Enable Register) */
movl $MSR_EFER, %ecx
@@ -390,7 +393,7 @@ default_entry:
/* Make changes effective */
wrmsr
-enable_paging:
+.Lenable_paging:
/*
* Enable paging
@@ -419,7 +422,7 @@ enable_paging:
*/
movb $4,X86 # at least 486
cmpl $-1,X86_CPUID
- je is486
+ je .Lis486
/* get vendor info */
xorl %eax,%eax # call CPUID with 0 -> return vendor ID
@@ -430,7 +433,7 @@ enable_paging:
movl %ecx,X86_VENDOR_ID+8 # last 4 chars
orl %eax,%eax # do we have processor info as well?
- je is486
+ je .Lis486
movl $1,%eax # Use the CPUID instruction to get CPU type
cpuid
@@ -444,7 +447,7 @@ enable_paging:
movb %cl,X86_MASK
movl %edx,X86_CAPABILITY
-is486:
+.Lis486:
movl $0x50022,%ecx # set AM, WP, NE and MP
movl %cr0,%eax
andl $0x80000011,%eax # Save PG,PE,ET
@@ -470,8 +473,9 @@ is486:
xorl %eax,%eax # Clear LDT
lldt %ax
- pushl $0 # fake return address for unwinder
- jmp *(initial_code)
+ call *(initial_code)
+1: jmp 1b
+ENDPROC(startup_32_smp)
#include "verify_cpu.S"
@@ -709,7 +713,12 @@ ENTRY(initial_page_table)
.data
.balign 4
ENTRY(initial_stack)
- .long init_thread_union+THREAD_SIZE
+ /*
+ * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
+ * unwinder reliably detect the end of the stack.
+ */
+ .long init_thread_union + THREAD_SIZE - SIZEOF_PTREGS - \
+ TOP_OF_KERNEL_STACK_PADDING;
__INITRODATA
int_msg:
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index b4421cc191b0..90de28841242 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -66,13 +66,8 @@ startup_64:
* tables and then reload them.
*/
- /*
- * Setup stack for verify_cpu(). "-8" because initial_stack is defined
- * this way, see below. Our best guess is a NULL ptr for stack
- * termination heuristics and we don't want to break anything which
- * might depend on it (kgdb, ...).
- */
- leaq (__end_init_task - 8)(%rip), %rsp
+ /* Set up the stack for verify_cpu(), similar to initial_stack below */
+ leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
/* Sanitize CPU configuration */
call verify_cpu
@@ -117,20 +112,20 @@ startup_64:
movq %rdi, %rax
shrq $PGDIR_SHIFT, %rax
- leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx
+ leaq (PAGE_SIZE + _KERNPG_TABLE)(%rbx), %rdx
movq %rdx, 0(%rbx,%rax,8)
movq %rdx, 8(%rbx,%rax,8)
- addq $4096, %rdx
+ addq $PAGE_SIZE, %rdx
movq %rdi, %rax
shrq $PUD_SHIFT, %rax
andl $(PTRS_PER_PUD-1), %eax
- movq %rdx, 4096(%rbx,%rax,8)
+ movq %rdx, PAGE_SIZE(%rbx,%rax,8)
incl %eax
andl $(PTRS_PER_PUD-1), %eax
- movq %rdx, 4096(%rbx,%rax,8)
+ movq %rdx, PAGE_SIZE(%rbx,%rax,8)
- addq $8192, %rbx
+ addq $PAGE_SIZE * 2, %rbx
movq %rdi, %rax
shrq $PMD_SHIFT, %rdi
addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
@@ -147,6 +142,9 @@ startup_64:
decl %ecx
jnz 1b
+ test %rbp, %rbp
+ jz .Lskip_fixup
+
/*
* Fixup the kernel text+data virtual addresses. Note that
* we might write invalid pmds, when the kernel is relocated
@@ -154,9 +152,9 @@ startup_64:
* beyond _end.
*/
leaq level2_kernel_pgt(%rip), %rdi
- leaq 4096(%rdi), %r8
+ leaq PAGE_SIZE(%rdi), %r8
/* See if it is a valid page table entry */
-1: testb $1, 0(%rdi)
+1: testb $_PAGE_PRESENT, 0(%rdi)
jz 2f
addq %rbp, 0(%rdi)
/* Go to the next page */
@@ -167,6 +165,7 @@ startup_64:
/* Fixup phys_base */
addq %rbp, phys_base(%rip)
+.Lskip_fixup:
movq $(early_level4_pgt - __START_KERNEL_map), %rax
jmp 1f
ENTRY(secondary_startup_64)
@@ -265,13 +264,17 @@ ENTRY(secondary_startup_64)
movl $MSR_GS_BASE,%ecx
movl initial_gs(%rip),%eax
movl initial_gs+4(%rip),%edx
- wrmsr
+ wrmsr
/* rsi is pointer to real mode structure with interesting info.
pass it to C */
movq %rsi, %rdi
-
- /* Finally jump to run C code and to be on real kernel address
+ jmp start_cpu
+ENDPROC(secondary_startup_64)
+
+ENTRY(start_cpu)
+ /*
+ * Jump to run C code and to be on a real kernel address.
* Since we are running on identity-mapped space we have to jump
* to the full 64bit address, this is only possible as indirect
* jump. In addition we need to ensure %cs is set so we make this
@@ -295,12 +298,13 @@ ENTRY(secondary_startup_64)
* REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
* address given in m16:64.
*/
- movq initial_code(%rip),%rax
- pushq $0 # fake return address to stop unwinder
+ call 1f # put return address on stack for unwinder
+1: xorq %rbp, %rbp # clear frame pointer
+ movq initial_code(%rip), %rax
pushq $__KERNEL_CS # set correct cs
pushq %rax # target address in negative space
lretq
-ENDPROC(secondary_startup_64)
+ENDPROC(start_cpu)
#include "verify_cpu.S"
@@ -308,15 +312,11 @@ ENDPROC(secondary_startup_64)
/*
* Boot CPU0 entry point. It's called from play_dead(). Everything has been set
* up already except stack. We just set up stack here. Then call
- * start_secondary().
+ * start_secondary() via start_cpu().
*/
ENTRY(start_cpu0)
- movq initial_stack(%rip),%rsp
- movq initial_code(%rip),%rax
- pushq $0 # fake return address to stop unwinder
- pushq $__KERNEL_CS # set correct cs
- pushq %rax # target address in negative space
- lretq
+ movq initial_stack(%rip), %rsp
+ jmp start_cpu
ENDPROC(start_cpu0)
#endif
@@ -328,7 +328,11 @@ ENDPROC(start_cpu0)
GLOBAL(initial_gs)
.quad INIT_PER_CPU_VAR(irq_stack_union)
GLOBAL(initial_stack)
- .quad init_thread_union+THREAD_SIZE-8
+ /*
+ * The SIZEOF_PTREGS gap is a convention which helps the in-kernel
+ * unwinder reliably detect the end of the stack.
+ */
+ .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS
__FINITDATA
bad_address:
diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
new file mode 100644
index 000000000000..cb9c1ed1d391
--- /dev/null
+++ b/arch/x86/kernel/itmt.c
@@ -0,0 +1,215 @@
+/*
+ * itmt.c: Support Intel Turbo Boost Max Technology 3.0
+ *
+ * (C) Copyright 2016 Intel Corporation
+ * Author: Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ * On platforms supporting Intel Turbo Boost Max Technology 3.0, (ITMT),
+ * the maximum turbo frequencies of some cores in a CPU package may be
+ * higher than for the other cores in the same package. In that case,
+ * better performance can be achieved by making the scheduler prefer
+ * to run tasks on the CPUs with higher max turbo frequencies.
+ *
+ * This file provides functions and data structures for enabling the
+ * scheduler to favor scheduling on cores can be boosted to a higher
+ * frequency under ITMT.
+ */
+
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/cpuset.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/sysctl.h>
+#include <linux/nodemask.h>
+
+static DEFINE_MUTEX(itmt_update_mutex);
+DEFINE_PER_CPU_READ_MOSTLY(int, sched_core_priority);
+
+/* Boolean to track if system has ITMT capabilities */
+static bool __read_mostly sched_itmt_capable;
+
+/*
+ * Boolean to control whether we want to move processes to cpu capable
+ * of higher turbo frequency for cpus supporting Intel Turbo Boost Max
+ * Technology 3.0.
+ *
+ * It can be set via /proc/sys/kernel/sched_itmt_enabled
+ */
+unsigned int __read_mostly sysctl_sched_itmt_enabled;
+
+static int sched_itmt_update_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ unsigned int old_sysctl;
+ int ret;
+
+ mutex_lock(&itmt_update_mutex);
+
+ if (!sched_itmt_capable) {
+ mutex_unlock(&itmt_update_mutex);
+ return -EINVAL;
+ }
+
+ old_sysctl = sysctl_sched_itmt_enabled;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
+ x86_topology_update = true;
+ rebuild_sched_domains();
+ }
+
+ mutex_unlock(&itmt_update_mutex);
+
+ return ret;
+}
+
+static unsigned int zero;
+static unsigned int one = 1;
+static struct ctl_table itmt_kern_table[] = {
+ {
+ .procname = "sched_itmt_enabled",
+ .data = &sysctl_sched_itmt_enabled,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_itmt_update_handler,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {}
+};
+
+static struct ctl_table itmt_root_table[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = itmt_kern_table,
+ },
+ {}
+};
+
+static struct ctl_table_header *itmt_sysctl_header;
+
+/**
+ * sched_set_itmt_support() - Indicate platform supports ITMT
+ *
+ * This function is used by the OS to indicate to scheduler that the platform
+ * is capable of supporting the ITMT feature.
+ *
+ * The current scheme has the pstate driver detects if the system
+ * is ITMT capable and call sched_set_itmt_support.
+ *
+ * This must be done only after sched_set_itmt_core_prio
+ * has been called to set the cpus' priorities.
+ * It must not be called with cpu hot plug lock
+ * held as we need to acquire the lock to rebuild sched domains
+ * later.
+ *
+ * Return: 0 on success
+ */
+int sched_set_itmt_support(void)
+{
+ mutex_lock(&itmt_update_mutex);
+
+ if (sched_itmt_capable) {
+ mutex_unlock(&itmt_update_mutex);
+ return 0;
+ }
+
+ itmt_sysctl_header = register_sysctl_table(itmt_root_table);
+ if (!itmt_sysctl_header) {
+ mutex_unlock(&itmt_update_mutex);
+ return -ENOMEM;
+ }
+
+ sched_itmt_capable = true;
+
+ sysctl_sched_itmt_enabled = 1;
+
+ if (sysctl_sched_itmt_enabled) {
+ x86_topology_update = true;
+ rebuild_sched_domains();
+ }
+
+ mutex_unlock(&itmt_update_mutex);
+
+ return 0;
+}
+
+/**
+ * sched_clear_itmt_support() - Revoke platform's support of ITMT
+ *
+ * This function is used by the OS to indicate that it has
+ * revoked the platform's support of ITMT feature.
+ *
+ * It must not be called with cpu hot plug lock
+ * held as we need to acquire the lock to rebuild sched domains
+ * later.
+ */
+void sched_clear_itmt_support(void)
+{
+ mutex_lock(&itmt_update_mutex);
+
+ if (!sched_itmt_capable) {
+ mutex_unlock(&itmt_update_mutex);
+ return;
+ }
+ sched_itmt_capable = false;
+
+ if (itmt_sysctl_header) {
+ unregister_sysctl_table(itmt_sysctl_header);
+ itmt_sysctl_header = NULL;
+ }
+
+ if (sysctl_sched_itmt_enabled) {
+ /* disable sched_itmt if we are no longer ITMT capable */
+ sysctl_sched_itmt_enabled = 0;
+ x86_topology_update = true;
+ rebuild_sched_domains();
+ }
+
+ mutex_unlock(&itmt_update_mutex);
+}
+
+int arch_asym_cpu_priority(int cpu)
+{
+ return per_cpu(sched_core_priority, cpu);
+}
+
+/**
+ * sched_set_itmt_core_prio() - Set CPU priority based on ITMT
+ * @prio: Priority of cpu core
+ * @core_cpu: The cpu number associated with the core
+ *
+ * The pstate driver will find out the max boost frequency
+ * and call this function to set a priority proportional
+ * to the max boost frequency. CPU with higher boost
+ * frequency will receive higher priority.
+ *
+ * No need to rebuild sched domain after updating
+ * the CPU priorities. The sched domains have no
+ * dependency on CPU priorities.
+ */
+void sched_set_itmt_core_prio(int prio, int core_cpu)
+{
+ int cpu, i = 1;
+
+ for_each_cpu(cpu, topology_sibling_cpumask(core_cpu)) {
+ int smt_prio;
+
+ /*
+ * Ensure that the siblings are moved to the end
+ * of the priority chain and only used when
+ * all other high priority cpus are out of capacity.
+ */
+ smt_prio = prio * smp_num_siblings / i;
+ per_cpu(sched_core_priority, cpu) = smt_prio;
+ i++;
+ }
+}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index edbbfc854e39..482eae10acba 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -267,13 +267,11 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
case KVM_PV_REASON_PAGE_NOT_PRESENT:
/* page is swapped out by the host. */
prev_state = exception_enter();
- exit_idle();
kvm_async_pf_task_wait((u32)read_cr2());
exception_exit(prev_state);
break;
case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter();
- exit_idle();
kvm_async_pf_task_wake((u32)read_cr2());
rcu_irq_exit();
break;
@@ -308,7 +306,7 @@ static void kvm_register_steal_time(void)
static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
-static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
+static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
{
/**
* This relies on __test_and_clear_bit to modify the memory
@@ -319,7 +317,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
*/
if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
return;
- apic_write(APIC_EOI, APIC_EOI_ACK);
+ apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
}
static void kvm_guest_cpu_init(void)
@@ -592,6 +590,14 @@ out:
local_irq_restore(flags);
}
+__visible bool __kvm_vcpu_is_preempted(int cpu)
+{
+ struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
+
+ return !!src->preempted;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
+
/*
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
*/
@@ -608,6 +614,11 @@ void __init kvm_spinlock_init(void)
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = kvm_wait;
pv_lock_ops.kick = kvm_kick_cpu;
+
+ if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ pv_lock_ops.vcpu_is_preempted =
+ PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
+ }
}
static __init int kvm_spinlock_init_jump(void)
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 7f3550acde1b..f5e3ff835cc8 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -44,6 +44,7 @@
#include <asm/msr.h>
static struct class *msr_class;
+static enum cpuhp_state cpuhp_msr_state;
static ssize_t msr_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
@@ -180,7 +181,7 @@ static const struct file_operations msr_fops = {
.compat_ioctl = msr_ioctl,
};
-static int msr_device_create(int cpu)
+static int msr_device_create(unsigned int cpu)
{
struct device *dev;
@@ -189,34 +190,12 @@ static int msr_device_create(int cpu)
return PTR_ERR_OR_ZERO(dev);
}
-static void msr_device_destroy(int cpu)
+static int msr_device_destroy(unsigned int cpu)
{
device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ return 0;
}
-static int msr_class_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int err = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- err = msr_device_create(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- msr_device_destroy(cpu);
- break;
- }
- return notifier_from_errno(err);
-}
-
-static struct notifier_block __refdata msr_class_cpu_notifier = {
- .notifier_call = msr_class_cpu_callback,
-};
-
static char *msr_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
@@ -224,13 +203,11 @@ static char *msr_devnode(struct device *dev, umode_t *mode)
static int __init msr_init(void)
{
- int i, err = 0;
- i = 0;
+ int err;
if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) {
pr_err("unable to get major %d for msr\n", MSR_MAJOR);
- err = -EBUSY;
- goto out;
+ return -EBUSY;
}
msr_class = class_create(THIS_MODULE, "msr");
if (IS_ERR(msr_class)) {
@@ -239,44 +216,28 @@ static int __init msr_init(void)
}
msr_class->devnode = msr_devnode;
- cpu_notifier_register_begin();
- for_each_online_cpu(i) {
- err = msr_device_create(i);
- if (err != 0)
- goto out_class;
- }
- __register_hotcpu_notifier(&msr_class_cpu_notifier);
- cpu_notifier_register_done();
-
- err = 0;
- goto out;
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/msr:online",
+ msr_device_create, msr_device_destroy);
+ if (err < 0)
+ goto out_class;
+ cpuhp_msr_state = err;
+ return 0;
out_class:
- i = 0;
- for_each_online_cpu(i)
- msr_device_destroy(i);
- cpu_notifier_register_done();
+ cpuhp_remove_state(cpuhp_msr_state);
class_destroy(msr_class);
out_chrdev:
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
-out:
return err;
}
+module_init(msr_init);
static void __exit msr_exit(void)
{
- int cpu = 0;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- msr_device_destroy(cpu);
+ cpuhp_remove_state(cpuhp_msr_state);
class_destroy(msr_class);
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
- __unregister_hotcpu_notifier(&msr_class_cpu_notifier);
- cpu_notifier_register_done();
}
-
-module_init(msr_init);
module_exit(msr_exit)
MODULE_AUTHOR("H. Peter Anvin <hpa@zytor.com>");
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 2c55a003b793..6d4bf812af45 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlock(struct qspinlock *lock)
{
native_queued_spin_unlock(lock);
}
-
PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
bool pv_is_native_spin_unlock(void)
@@ -21,12 +20,25 @@ bool pv_is_native_spin_unlock(void)
__raw_callee_save___native_queued_spin_unlock;
}
+__visible bool __native_vcpu_is_preempted(int cpu)
+{
+ return false;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
+
+bool pv_is_native_vcpu_is_preempted(void)
+{
+ return pv_lock_ops.vcpu_is_preempted.func ==
+ __raw_callee_save___native_vcpu_is_preempted;
+}
+
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
.queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
.wait = paravirt_nop,
.kick = paravirt_nop,
+ .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
#endif /* SMP */
};
EXPORT_SYMBOL(pv_lock_ops);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index bbf3d5933eaa..a1bfba0f7234 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -328,7 +328,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
.cpuid = native_cpuid,
.get_debugreg = native_get_debugreg,
.set_debugreg = native_set_debugreg,
- .clts = native_clts,
.read_cr0 = native_read_cr0,
.write_cr0 = native_write_cr0,
.read_cr4 = native_read_cr4,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index 920c6ae08592..5d2e57163154 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -8,10 +8,10 @@ DEF_NATIVE(pv_cpu_ops, iret, "iret");
DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
-DEF_NATIVE(pv_cpu_ops, clts, "clts");
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -27,6 +27,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
}
extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
@@ -48,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_mmu_ops, read_cr2);
PATCH_SITE(pv_mmu_ops, read_cr3);
PATCH_SITE(pv_mmu_ops, write_cr3);
- PATCH_SITE(pv_cpu_ops, clts);
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
if (pv_is_native_spin_unlock()) {
@@ -56,6 +56,12 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
end = end_pv_lock_ops_queued_spin_unlock;
goto patch_site;
}
+ case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+ if (pv_is_native_vcpu_is_preempted()) {
+ start = start_pv_lock_ops_vcpu_is_preempted;
+ end = end_pv_lock_ops_vcpu_is_preempted;
+ goto patch_site;
+ }
#endif
default:
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
index bb3840cedb4f..0274222e7222 100644
--- a/arch/x86/kernel/paravirt_patch_64.c
+++ b/arch/x86/kernel/paravirt_patch_64.c
@@ -10,7 +10,6 @@ DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
-DEF_NATIVE(pv_cpu_ops, clts, "clts");
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
@@ -21,6 +20,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
#endif
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -36,6 +36,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
}
extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
unsigned long addr, unsigned len)
@@ -58,7 +59,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_mmu_ops, read_cr2);
PATCH_SITE(pv_mmu_ops, read_cr3);
PATCH_SITE(pv_mmu_ops, write_cr3);
- PATCH_SITE(pv_cpu_ops, clts);
PATCH_SITE(pv_mmu_ops, flush_tlb_single);
PATCH_SITE(pv_cpu_ops, wbinvd);
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
@@ -68,6 +68,12 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
end = end_pv_lock_ops_queued_spin_unlock;
goto patch_site;
}
+ case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+ if (pv_is_native_vcpu_is_preempted()) {
+ start = start_pv_lock_ops_vcpu_is_preempted;
+ end = end_pv_lock_ops_vcpu_is_preempted;
+ goto patch_site;
+ }
#endif
default:
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0888a879120f..7423acf15b04 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -65,23 +65,6 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
};
EXPORT_PER_CPU_SYMBOL(cpu_tss);
-#ifdef CONFIG_X86_64
-static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
- atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
-#endif
-
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
@@ -251,39 +234,10 @@ static inline void play_dead(void)
}
#endif
-#ifdef CONFIG_X86_64
-void enter_idle(void)
-{
- this_cpu_write(is_idle, 1);
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
-}
-
-static void __exit_idle(void)
-{
- if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
- return;
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
-}
-
-/* Called from interrupts to signify idle end */
-void exit_idle(void)
-{
- /* idle loop has pid 0 */
- if (current->pid)
- return;
- __exit_idle();
-}
-#endif
-
void arch_cpu_idle_enter(void)
{
+ tsc_verify_tsc_adjust();
local_touch_nmi();
- enter_idle();
-}
-
-void arch_cpu_idle_exit(void)
-{
- __exit_idle();
}
void arch_cpu_idle_dead(void)
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index bd7be8efdc4c..becaa012f620 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -54,6 +54,7 @@
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/vm86.h>
+#include <asm/intel_rdt.h>
void __show_regs(struct pt_regs *regs, int all)
{
@@ -72,10 +73,9 @@ void __show_regs(struct pt_regs *regs, int all)
savesegment(gs, gs);
}
- printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
- (u16)regs->cs, regs->ip, regs->flags,
- smp_processor_id());
- print_symbol("EIP is at %s\n", regs->ip);
+ printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
+ printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
+ smp_processor_id());
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
regs->ax, regs->bx, regs->cx, regs->dx);
@@ -232,11 +232,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
- fpu_switch_t fpu_switch;
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);
/*
* Save away %gs. No need to save %fs, as it was saved on the
@@ -295,9 +294,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs)
lazy_load_gs(next->gs);
- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu, cpu);
this_cpu_write(current_task, next_p);
+ /* Load the Intel cache allocation PQR MSR. */
+ intel_rdt_sched_in();
+
return prev_p;
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index b3760b3c1ca0..3a966b94817c 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -50,6 +50,7 @@
#include <asm/switch_to.h>
#include <asm/xen/hypervisor.h>
#include <asm/vdso.h>
+#include <asm/intel_rdt.h>
__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
@@ -61,10 +62,15 @@ void __show_regs(struct pt_regs *regs, int all)
unsigned int fsindex, gsindex;
unsigned int ds, cs, es;
- printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
- printk_address(regs->ip);
- printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
- regs->sp, regs->flags);
+ printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs & 0xffff,
+ (void *)regs->ip);
+ printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
+ regs->sp, regs->flags);
+ if (regs->orig_ax != -1)
+ pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
+ else
+ pr_cont("\n");
+
printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
regs->ax, regs->bx, regs->cx);
printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
@@ -265,9 +271,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
int cpu = smp_processor_id();
struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
unsigned prev_fsindex, prev_gsindex;
- fpu_switch_t fpu_switch;
- fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
+ switch_fpu_prepare(prev_fpu, cpu);
/* We must save %fs and %gs before load_TLS() because
* %fs and %gs may be cleared by load_TLS().
@@ -417,7 +422,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
prev->gsbase = 0;
prev->gsindex = prev_gsindex;
- switch_fpu_finish(next_fpu, fpu_switch);
+ switch_fpu_finish(next_fpu, cpu);
/*
* Switch the PDA and FPU contexts.
@@ -473,6 +478,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
loadsegment(ss, __KERNEL_DS);
}
+ /* Load the Intel cache allocation PQR MSR. */
+ intel_rdt_sched_in();
+
return prev_p;
}
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 79c6311cd912..5b21cb7d84d6 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -64,6 +64,15 @@ void mach_get_cmos_time(struct timespec *now)
unsigned int status, year, mon, day, hour, min, sec, century = 0;
unsigned long flags;
+ /*
+ * If pm_trace abused the RTC as storage, set the timespec to 0,
+ * which tells the caller that this RTC value is unusable.
+ */
+ if (!pm_trace_rtc_valid()) {
+ now->tv_sec = now->tv_nsec = 0;
+ return;
+ }
+
spin_lock_irqsave(&rtc_lock, flags);
/*
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 2bbd27f89802..9820d6d977c6 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -1,7 +1,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/percpu.h>
@@ -12,6 +12,7 @@
#include <linux/pfn.h>
#include <asm/sections.h>
#include <asm/processor.h>
+#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index c00cb64bc0a1..68f8cc222f25 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -261,10 +261,8 @@ static inline void __smp_reschedule_interrupt(void)
__visible void smp_reschedule_interrupt(struct pt_regs *regs)
{
- irq_enter();
ack_APIC_irq();
__smp_reschedule_interrupt();
- irq_exit();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 42f5eb7b4f6c..2a501abe5000 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -109,6 +109,17 @@ static bool logical_packages_frozen __read_mostly;
/* Maximum number of SMT threads on any online core */
int __max_smt_threads __read_mostly;
+/* Flag to indicate if a complete sched domain rebuild is required */
+bool x86_topology_update;
+
+int arch_update_cpu_topology(void)
+{
+ int retval = x86_topology_update;
+
+ x86_topology_update = false;
+ return retval;
+}
+
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{
unsigned long flags;
@@ -471,22 +482,42 @@ static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
+#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
+static inline int x86_sched_itmt_flags(void)
+{
+ return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
+}
+
+#ifdef CONFIG_SCHED_MC
+static int x86_core_flags(void)
+{
+ return cpu_core_flags() | x86_sched_itmt_flags();
+}
+#endif
+#ifdef CONFIG_SCHED_SMT
+static int x86_smt_flags(void)
+{
+ return cpu_smt_flags() | x86_sched_itmt_flags();
+}
+#endif
+#endif
+
static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
#ifdef CONFIG_SCHED_SMT
- { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+ { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
#endif
#ifdef CONFIG_SCHED_MC
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+ { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
#endif
{ NULL, },
};
static struct sched_domain_topology_level x86_topology[] = {
#ifdef CONFIG_SCHED_SMT
- { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
+ { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
#endif
#ifdef CONFIG_SCHED_MC
- { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
+ { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
#endif
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
{ NULL, },
@@ -821,14 +852,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
return (send_status | accept_status);
}
-void smp_announce(void)
-{
- int num_nodes = num_online_nodes();
-
- printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
- num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
-}
-
/* reduce the number of lines printed when booting a large cpu count system */
static void announce_cpu(int cpu, int apicid)
{
@@ -964,9 +987,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
int cpu0_nmi_registered = 0;
unsigned long timeout;
- idle->thread.sp = (unsigned long) (((struct pt_regs *)
- (THREAD_SIZE + task_stack_page(idle))) - 1);
-
+ idle->thread.sp = (unsigned long)task_pt_regs(idle);
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
initial_stack = idle->thread.sp;
@@ -1111,7 +1132,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
return err;
/* the FPU context is blank, nobody can own it */
- __cpu_disable_lazy_restore(cpu);
+ per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
common_cpu_up(cpu, tidle);
@@ -1331,7 +1352,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
default_setup_apic_routing();
cpu0_logical_apicid = apic_bsp_setup(false);
- pr_info("CPU%d: ", 0);
+ pr_info("CPU0: ");
print_cpu_info(&cpu_data(0));
if (is_uv_system())
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index bd4e3d4d3625..bf0c6d049080 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -853,6 +853,8 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
dotraplinkage void
do_device_not_available(struct pt_regs *regs, long error_code)
{
+ unsigned long cr0;
+
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
#ifdef CONFIG_MATH_EMULATION
@@ -866,10 +868,20 @@ do_device_not_available(struct pt_regs *regs, long error_code)
return;
}
#endif
- fpu__restore(&current->thread.fpu); /* interrupts still off */
-#ifdef CONFIG_X86_32
- cond_local_irq_enable(regs);
-#endif
+
+ /* This should not happen. */
+ cr0 = read_cr0();
+ if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
+ /* Try to fix it up and carry on. */
+ write_cr0(cr0 & ~X86_CR0_TS);
+ } else {
+ /*
+ * Something terrible happened, and we're better off trying
+ * to kill the task than getting stuck in a never-ending
+ * loop of #NM faults.
+ */
+ die("unexpected #NM exception", regs, error_code);
+ }
}
NOKPROBE_SYMBOL(do_device_not_available);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b3e397a0f29d..0871b8e3d7df 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -703,6 +703,20 @@ unsigned long native_calibrate_tsc(void)
}
}
+ /*
+ * TSC frequency determined by CPUID is a "hardware reported"
+ * frequency and is the most accurate one so far we have. This
+ * is considered a known frequency.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+
+ /*
+ * For Atom SoCs TSC is the only reliable clocksource.
+ * Mark TSC reliable so no watchdog on it.
+ */
+ if (boot_cpu_data.x86_model == INTEL_FAM6_ATOM_GOLDMONT)
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+
return crystal_khz * ebx_numerator / eax_denominator;
}
@@ -1044,18 +1058,20 @@ static void detect_art(void)
if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF)
return;
- cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
- &art_to_tsc_numerator, unused, unused+1);
-
- /* Don't enable ART in a VM, non-stop TSC required */
+ /* Don't enable ART in a VM, non-stop TSC and TSC_ADJUST required */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR) ||
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
- art_to_tsc_denominator < ART_MIN_DENOMINATOR)
+ !boot_cpu_has(X86_FEATURE_TSC_ADJUST))
return;
- if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset))
+ cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator,
+ &art_to_tsc_numerator, unused, unused+1);
+
+ if (art_to_tsc_denominator < ART_MIN_DENOMINATOR)
return;
+ rdmsrl(MSR_IA32_TSC_ADJUST, art_to_tsc_offset);
+
/* Make this sticky over multiple CPU init calls */
setup_force_cpu_cap(X86_FEATURE_ART);
}
@@ -1284,10 +1300,10 @@ static int __init init_tsc_clocksource(void)
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
/*
- * Trust the results of the earlier calibration on systems
- * exporting a reliable TSC.
+ * When TSC frequency is known (retrieved via MSR or CPUID), we skip
+ * the refined calibration and directly register it as a clocksource.
*/
- if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
+ if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
clocksource_register_khz(&clocksource_tsc, tsc_khz);
return 0;
}
@@ -1364,6 +1380,8 @@ void __init tsc_init(void)
if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized");
+ else
+ tsc_store_and_check_tsc_adjust();
check_system_tsc_reliable();
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 0fe720d64fef..19afdbd7d0a7 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -100,5 +100,24 @@ unsigned long cpu_khz_from_msr(void)
#ifdef CONFIG_X86_LOCAL_APIC
lapic_timer_frequency = (freq * 1000) / HZ;
#endif
+
+ /*
+ * TSC frequency determined by MSR is always considered "known"
+ * because it is reported by HW.
+ * Another fact is that on MSR capable platforms, PIT/HPET is
+ * generally not available so calibration won't work at all.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+
+ /*
+ * Unfortunately there is no way for hardware to tell whether the
+ * TSC is reliable. We were told by silicon design team that TSC
+ * on Atom SoCs are always "reliable". TSC is also the only
+ * reliable clocksource on these SoCs (HPET is either not present
+ * or not functional) so mark TSC reliable which removes the
+ * requirement for a watchdog clocksource.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+
return res;
}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 78083bf23ed1..a75f696011d5 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -14,18 +14,145 @@
* ( The serial nature of the boot logic and the CPU hotplug lock
* protects against more than 2 CPUs entering this code. )
*/
+#include <linux/topology.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/tsc.h>
+struct tsc_adjust {
+ s64 bootval;
+ s64 adjusted;
+ unsigned long nextcheck;
+ bool warned;
+};
+
+static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+
+void tsc_verify_tsc_adjust(void)
+{
+ struct tsc_adjust *adj = this_cpu_ptr(&tsc_adjust);
+ s64 curval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return;
+
+ /* Rate limit the MSR check */
+ if (time_before(jiffies, adj->nextcheck))
+ return;
+
+ adj->nextcheck = jiffies + HZ;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, curval);
+ if (adj->adjusted == curval)
+ return;
+
+ /* Restore the original value */
+ wrmsrl(MSR_IA32_TSC_ADJUST, adj->adjusted);
+
+ if (!adj->warned) {
+ pr_warn(FW_BUG "TSC ADJUST differs: CPU%u %lld --> %lld. Restoring\n",
+ smp_processor_id(), adj->adjusted, curval);
+ adj->warned = true;
+ }
+}
+
+#ifndef CONFIG_SMP
+bool __init tsc_store_and_check_tsc_adjust(void)
+{
+ struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
+ s64 bootval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return false;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+ cur->bootval = bootval;
+ cur->adjusted = bootval;
+ cur->nextcheck = jiffies + HZ;
+ pr_info("TSC ADJUST: Boot CPU0: %lld\n", bootval);
+ return false;
+}
+
+#else /* !CONFIG_SMP */
+
+/*
+ * Store and check the TSC ADJUST MSR if available
+ */
+bool tsc_store_and_check_tsc_adjust(void)
+{
+ struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
+ unsigned int refcpu, cpu = smp_processor_id();
+ struct cpumask *mask;
+ s64 bootval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return false;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+ cur->bootval = bootval;
+ cur->nextcheck = jiffies + HZ;
+ cur->warned = false;
+
+ /*
+ * Check whether this CPU is the first in a package to come up. In
+ * this case do not check the boot value against another package
+ * because the package might have been physically hotplugged, where
+ * TSC_ADJUST is expected to be different. When called on the boot
+ * CPU topology_core_cpumask() might not be available yet.
+ */
+ mask = topology_core_cpumask(cpu);
+ refcpu = mask ? cpumask_any_but(mask, cpu) : nr_cpu_ids;
+
+ if (refcpu >= nr_cpu_ids) {
+ /*
+ * First online CPU in a package stores the boot value in
+ * the adjustment value. This value might change later via
+ * the sync mechanism. If that fails we still can yell
+ * about boot values not being consistent.
+ */
+ cur->adjusted = bootval;
+ pr_info_once("TSC ADJUST: Boot CPU%u: %lld\n", cpu, bootval);
+ return false;
+ }
+
+ ref = per_cpu_ptr(&tsc_adjust, refcpu);
+ /*
+ * Compare the boot value and complain if it differs in the
+ * package.
+ */
+ if (bootval != ref->bootval) {
+ pr_warn("TSC ADJUST differs: Reference CPU%u: %lld CPU%u: %lld\n",
+ refcpu, ref->bootval, cpu, bootval);
+ }
+ /*
+ * The TSC_ADJUST values in a package must be the same. If the boot
+ * value on this newly upcoming CPU differs from the adjustment
+ * value of the already online CPU in this package, set it to that
+ * adjusted value.
+ */
+ if (bootval != ref->adjusted) {
+ pr_warn("TSC ADJUST synchronize: Reference CPU%u: %lld CPU%u: %lld\n",
+ refcpu, ref->adjusted, cpu, bootval);
+ cur->adjusted = ref->adjusted;
+ wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
+ }
+ /*
+ * We have the TSCs forced to be in sync on this package. Skip sync
+ * test:
+ */
+ return true;
+}
+
/*
* Entry/exit counters that make sure that both CPUs
* run the measurement code at once:
*/
static atomic_t start_count;
static atomic_t stop_count;
+static atomic_t skip_test;
+static atomic_t test_runs;
/*
* We use a raw spinlock in this exceptional case, because
@@ -37,15 +164,16 @@ static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static cycles_t last_tsc;
static cycles_t max_warp;
static int nr_warps;
+static int random_warps;
/*
* TSC-warp measurement loop running on both CPUs. This is not called
* if there is no TSC.
*/
-static void check_tsc_warp(unsigned int timeout)
+static cycles_t check_tsc_warp(unsigned int timeout)
{
- cycles_t start, now, prev, end;
- int i;
+ cycles_t start, now, prev, end, cur_max_warp = 0;
+ int i, cur_warps = 0;
start = rdtsc_ordered();
/*
@@ -85,13 +213,22 @@ static void check_tsc_warp(unsigned int timeout)
if (unlikely(prev > now)) {
arch_spin_lock(&sync_lock);
max_warp = max(max_warp, prev - now);
+ cur_max_warp = max_warp;
+ /*
+ * Check whether this bounces back and forth. Only
+ * one CPU should observe time going backwards.
+ */
+ if (cur_warps != nr_warps)
+ random_warps++;
nr_warps++;
+ cur_warps = nr_warps;
arch_spin_unlock(&sync_lock);
}
}
WARN(!(now-start),
"Warning: zero tsc calibration delta: %Ld [max: %Ld]\n",
now-start, end-start);
+ return cur_max_warp;
}
/*
@@ -136,15 +273,26 @@ void check_tsc_sync_source(int cpu)
}
/*
- * Reset it - in case this is a second bootup:
+ * Set the maximum number of test runs to
+ * 1 if the CPU does not provide the TSC_ADJUST MSR
+ * 3 if the MSR is available, so the target can try to adjust
*/
- atomic_set(&stop_count, 0);
-
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ atomic_set(&test_runs, 1);
+ else
+ atomic_set(&test_runs, 3);
+retry:
/*
- * Wait for the target to arrive:
+ * Wait for the target to start or to skip the test:
*/
- while (atomic_read(&start_count) != cpus-1)
+ while (atomic_read(&start_count) != cpus - 1) {
+ if (atomic_read(&skip_test) > 0) {
+ atomic_set(&skip_test, 0);
+ return;
+ }
cpu_relax();
+ }
+
/*
* Trigger the target to continue into the measurement too:
*/
@@ -155,21 +303,35 @@ void check_tsc_sync_source(int cpu)
while (atomic_read(&stop_count) != cpus-1)
cpu_relax();
- if (nr_warps) {
+ /*
+ * If the test was successful set the number of runs to zero and
+ * stop. If not, decrement the number of runs an check if we can
+ * retry. In case of random warps no retry is attempted.
+ */
+ if (!nr_warps) {
+ atomic_set(&test_runs, 0);
+
+ pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
+ smp_processor_id(), cpu);
+
+ } else if (atomic_dec_and_test(&test_runs) || random_warps) {
+ /* Force it to 0 if random warps brought us here */
+ atomic_set(&test_runs, 0);
+
pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n",
smp_processor_id(), cpu);
pr_warning("Measured %Ld cycles TSC warp between CPUs, "
"turning off TSC clock.\n", max_warp);
+ if (random_warps)
+ pr_warning("TSC warped randomly between CPUs\n");
mark_tsc_unstable("check_tsc_sync_source failed");
- } else {
- pr_debug("TSC synchronization [CPU#%d -> CPU#%d]: passed\n",
- smp_processor_id(), cpu);
}
/*
* Reset it - just in case we boot another CPU later:
*/
atomic_set(&start_count, 0);
+ random_warps = 0;
nr_warps = 0;
max_warp = 0;
last_tsc = 0;
@@ -178,6 +340,12 @@ void check_tsc_sync_source(int cpu)
* Let the target continue with the bootup:
*/
atomic_inc(&stop_count);
+
+ /*
+ * Retry, if there is a chance to do so.
+ */
+ if (atomic_read(&test_runs) > 0)
+ goto retry;
}
/*
@@ -185,6 +353,9 @@ void check_tsc_sync_source(int cpu)
*/
void check_tsc_sync_target(void)
{
+ struct tsc_adjust *cur = this_cpu_ptr(&tsc_adjust);
+ unsigned int cpu = smp_processor_id();
+ cycles_t cur_max_warp, gbl_max_warp;
int cpus = 2;
/* Also aborts if there is no TSC. */
@@ -192,6 +363,16 @@ void check_tsc_sync_target(void)
return;
/*
+ * Store, verify and sanitize the TSC adjust register. If
+ * successful skip the test.
+ */
+ if (tsc_store_and_check_tsc_adjust()) {
+ atomic_inc(&skip_test);
+ return;
+ }
+
+retry:
+ /*
* Register this CPU's participation and wait for the
* source CPU to start the measurement:
*/
@@ -199,7 +380,12 @@ void check_tsc_sync_target(void)
while (atomic_read(&start_count) != cpus)
cpu_relax();
- check_tsc_warp(loop_timeout(smp_processor_id()));
+ cur_max_warp = check_tsc_warp(loop_timeout(cpu));
+
+ /*
+ * Store the maximum observed warp value for a potential retry:
+ */
+ gbl_max_warp = max_warp;
/*
* Ok, we are done:
@@ -211,4 +397,47 @@ void check_tsc_sync_target(void)
*/
while (atomic_read(&stop_count) != cpus)
cpu_relax();
+
+ /*
+ * Reset it for the next sync test:
+ */
+ atomic_set(&stop_count, 0);
+
+ /*
+ * Check the number of remaining test runs. If not zero, the test
+ * failed and a retry with adjusted TSC is possible. If zero the
+ * test was either successful or failed terminally.
+ */
+ if (!atomic_read(&test_runs))
+ return;
+
+ /*
+ * If the warp value of this CPU is 0, then the other CPU
+ * observed time going backwards so this TSC was ahead and
+ * needs to move backwards.
+ */
+ if (!cur_max_warp)
+ cur_max_warp = -gbl_max_warp;
+
+ /*
+ * Add the result to the previous adjustment value.
+ *
+ * The adjustement value is slightly off by the overhead of the
+ * sync mechanism (observed values are ~200 TSC cycles), but this
+ * really depends on CPU, node distance and frequency. So
+ * compensating for this is hard to get right. Experiments show
+ * that the warp is not longer detectable when the observed warp
+ * value is used. In the worst case the adjustment needs to go
+ * through a 3rd run for fine tuning.
+ */
+ cur->adjusted += cur_max_warp;
+
+ pr_warn("TSC ADJUST compensate: CPU%u observed %lld warp. Adjust: %lld\n",
+ cpu, cur_max_warp, cur->adjusted);
+
+ wrmsrl(MSR_IA32_TSC_ADJUST, cur->adjusted);
+ goto retry;
+
}
+
+#endif /* CONFIG_SMP */
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index a2456d4d286a..ea7b7f9a3b9e 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -14,13 +14,55 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
if (unwind_done(state))
return 0;
+ if (state->regs && user_mode(state->regs))
+ return 0;
+
addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
addr_p);
- return __kernel_text_address(addr) ? addr : 0;
+ if (!__kernel_text_address(addr)) {
+ printk_deferred_once(KERN_WARNING
+ "WARNING: unrecognized kernel stack return address %p at %p in %s:%d\n",
+ (void *)addr, addr_p, state->task->comm,
+ state->task->pid);
+ return 0;
+ }
+
+ return addr;
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
+static size_t regs_size(struct pt_regs *regs)
+{
+ /* x86_32 regs from kernel mode are two words shorter: */
+ if (IS_ENABLED(CONFIG_X86_32) && !user_mode(regs))
+ return sizeof(*regs) - 2*sizeof(long);
+
+ return sizeof(*regs);
+}
+
+static bool is_last_task_frame(struct unwind_state *state)
+{
+ unsigned long bp = (unsigned long)state->bp;
+ unsigned long regs = (unsigned long)task_pt_regs(state->task);
+
+ return bp == regs - FRAME_HEADER_SIZE;
+}
+
+/*
+ * This determines if the frame pointer actually contains an encoded pointer to
+ * pt_regs on the stack. See ENCODE_FRAME_POINTER.
+ */
+static struct pt_regs *decode_frame_pointer(unsigned long *bp)
+{
+ unsigned long regs = (unsigned long)bp;
+
+ if (!(regs & 0x1))
+ return NULL;
+
+ return (struct pt_regs *)(regs & ~0x1);
+}
+
static bool update_stack_state(struct unwind_state *state, void *addr,
size_t len)
{
@@ -43,26 +85,117 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
bool unwind_next_frame(struct unwind_state *state)
{
- unsigned long *next_bp;
+ struct pt_regs *regs;
+ unsigned long *next_bp, *next_frame;
+ size_t next_len;
+ enum stack_type prev_type = state->stack_info.type;
if (unwind_done(state))
return false;
- next_bp = (unsigned long *)*state->bp;
+ /* have we reached the end? */
+ if (state->regs && user_mode(state->regs))
+ goto the_end;
+
+ if (is_last_task_frame(state)) {
+ regs = task_pt_regs(state->task);
+
+ /*
+ * kthreads (other than the boot CPU's idle thread) have some
+ * partial regs at the end of their stack which were placed
+ * there by copy_thread_tls(). But the regs don't have any
+ * useful information, so we can skip them.
+ *
+ * This user_mode() check is slightly broader than a PF_KTHREAD
+ * check because it also catches the awkward situation where a
+ * newly forked kthread transitions into a user task by calling
+ * do_execve(), which eventually clears PF_KTHREAD.
+ */
+ if (!user_mode(regs))
+ goto the_end;
+
+ /*
+ * We're almost at the end, but not quite: there's still the
+ * syscall regs frame. Entry code doesn't encode the regs
+ * pointer for syscalls, so we have to set it manually.
+ */
+ state->regs = regs;
+ state->bp = NULL;
+ return true;
+ }
+
+ /* get the next frame pointer */
+ if (state->regs)
+ next_bp = (unsigned long *)state->regs->bp;
+ else
+ next_bp = (unsigned long *)*state->bp;
+
+ /* is the next frame pointer an encoded pointer to pt_regs? */
+ regs = decode_frame_pointer(next_bp);
+ if (regs) {
+ next_frame = (unsigned long *)regs;
+ next_len = sizeof(*regs);
+ } else {
+ next_frame = next_bp;
+ next_len = FRAME_HEADER_SIZE;
+ }
/* make sure the next frame's data is accessible */
- if (!update_stack_state(state, next_bp, FRAME_HEADER_SIZE))
- return false;
+ if (!update_stack_state(state, next_frame, next_len)) {
+ /*
+ * Don't warn on bad regs->bp. An interrupt in entry code
+ * might cause a false positive warning.
+ */
+ if (state->regs)
+ goto the_end;
+
+ goto bad_address;
+ }
+
+ /* Make sure it only unwinds up and doesn't overlap the last frame: */
+ if (state->stack_info.type == prev_type) {
+ if (state->regs && (void *)next_frame < (void *)state->regs + regs_size(state->regs))
+ goto bad_address;
+
+ if (state->bp && (void *)next_frame < (void *)state->bp + FRAME_HEADER_SIZE)
+ goto bad_address;
+ }
/* move to the next frame */
- state->bp = next_bp;
+ if (regs) {
+ state->regs = regs;
+ state->bp = NULL;
+ } else {
+ state->bp = next_bp;
+ state->regs = NULL;
+ }
+
return true;
+
+bad_address:
+ if (state->regs) {
+ printk_deferred_once(KERN_WARNING
+ "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
+ state->regs, state->task->comm,
+ state->task->pid, next_frame);
+ } else {
+ printk_deferred_once(KERN_WARNING
+ "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
+ state->bp, state->task->comm,
+ state->task->pid, next_frame);
+ }
+the_end:
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
}
EXPORT_SYMBOL_GPL(unwind_next_frame);
void __unwind_start(struct unwind_state *state, struct task_struct *task,
struct pt_regs *regs, unsigned long *first_frame)
{
+ unsigned long *bp, *frame;
+ size_t len;
+
memset(state, 0, sizeof(*state));
state->task = task;
@@ -73,12 +206,22 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
}
/* set up the starting stack frame */
- state->bp = get_frame_pointer(task, regs);
+ bp = get_frame_pointer(task, regs);
+ regs = decode_frame_pointer(bp);
+ if (regs) {
+ state->regs = regs;
+ frame = (unsigned long *)regs;
+ len = sizeof(*regs);
+ } else {
+ state->bp = bp;
+ frame = bp;
+ len = FRAME_HEADER_SIZE;
+ }
/* initialize stack info and make sure the frame data is accessible */
- get_stack_info(state->bp, state->task, &state->stack_info,
+ get_stack_info(frame, state->task, &state->stack_info,
&state->stack_mask);
- update_stack_state(state, state->bp, FRAME_HEADER_SIZE);
+ update_stack_state(state, frame, len);
/*
* The caller can provide the address of the first frame directly
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index dbf67f64d5ec..e79f15f108a8 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -91,10 +91,10 @@ SECTIONS
/* Text and read-only data */
.text : AT(ADDR(.text) - LOAD_OFFSET) {
_text = .;
+ _stext = .;
/* bootstrapping code */
HEAD_TEXT
. = ALIGN(8);
- _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index afa7bbb596cd..0aefb626fa8f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,7 +16,6 @@
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
#include <asm/user.h>
#include <asm/fpu/xstate.h>
#include "cpuid.h"
@@ -114,8 +113,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- if (use_eager_fpu())
- kvm_x86_ops->fpu_activate(vcpu);
+ kvm_x86_ops->fpu_activate(vcpu);
/*
* The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5382b82462fc..3980da515fd0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2145,12 +2145,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
#endif
if (vmx->host_state.msr_host_bndcfgs)
wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
- /*
- * If the FPU is not active (through the host task or
- * the guest vcpu), then restore the cr0.TS bit.
- */
- if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded)
- stts();
load_gdt(this_cpu_ptr(&host_gdt));
}
@@ -4845,9 +4839,11 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
u32 low32, high32;
unsigned long tmpl;
struct desc_ptr dt;
- unsigned long cr4;
+ unsigned long cr0, cr4;
- vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
+ cr0 = read_cr0();
+ WARN_ON(cr0 & X86_CR0_TS);
+ vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
/* Save the most likely value for this task's CR4 in the VMCS. */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 073eaeabc2a7..2912684ff902 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2071,6 +2071,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
return;
+ vcpu->arch.st.steal.preempted = 0;
+
if (vcpu->arch.st.steal.version & 1)
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
@@ -2826,8 +2828,22 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
+static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+ vcpu->arch.st.steal.preempted = 1;
+
+ kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+ &vcpu->arch.st.steal.preempted,
+ offsetof(struct kvm_steal_time, preempted),
+ sizeof(vcpu->arch.st.steal.preempted));
+}
+
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ kvm_steal_time_set_preempted(vcpu);
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = rdtsc();
@@ -5081,11 +5097,6 @@ static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
{
preempt_disable();
kvm_load_guest_fpu(emul_to_vcpu(ctxt));
- /*
- * CR0.TS may reference the host fpu state, not the guest fpu state,
- * so it may be clear at this point.
- */
- clts();
}
static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
@@ -7407,25 +7418,13 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
- if (!vcpu->guest_fpu_loaded) {
- vcpu->fpu_counter = 0;
+ if (!vcpu->guest_fpu_loaded)
return;
- }
vcpu->guest_fpu_loaded = 0;
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
- /*
- * If using eager FPU mode, or if the guest is a frequent user
- * of the FPU, just leave the FPU active for next time.
- * Every 255 times fpu_counter rolls over to 0; a guest that uses
- * the FPU in bursts will revert to loading it on demand.
- */
- if (!use_eager_fpu()) {
- if (++vcpu->fpu_counter < 5)
- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
- }
trace_kvm_fpu(0);
}
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 25da5bc8d83d..4ca0d78adcf0 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -497,38 +497,24 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
* a whole series of functions like read_cr0() and write_cr0().
*
* We start with cr0. cr0 allows you to turn on and off all kinds of basic
- * features, but Linux only really cares about one: the horrifically-named Task
- * Switched (TS) bit at bit 3 (ie. 8)
+ * features, but the only cr0 bit that Linux ever used at runtime was the
+ * horrifically-named Task Switched (TS) bit at bit 3 (ie. 8)
*
* What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
* the floating point unit is used. Which allows us to restore FPU state
- * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
- * name like "FPUTRAP bit" be a little less cryptic?
+ * lazily after a task switch if we wanted to, but wouldn't a name like
+ * "FPUTRAP bit" be a little less cryptic?
*
- * We store cr0 locally because the Host never changes it. The Guest sometimes
- * wants to read it and we'd prefer not to bother the Host unnecessarily.
+ * Fortunately, Linux keeps it simple and doesn't use TS, so we can ignore
+ * cr0.
*/
-static unsigned long current_cr0;
static void lguest_write_cr0(unsigned long val)
{
- lazy_hcall1(LHCALL_TS, val & X86_CR0_TS);
- current_cr0 = val;
}
static unsigned long lguest_read_cr0(void)
{
- return current_cr0;
-}
-
-/*
- * Intel provided a special instruction to clear the TS bit for people too cool
- * to use write_cr0() to do it. This "clts" instruction is faster, because all
- * the vowels have been optimized out.
- */
-static void lguest_clts(void)
-{
- lazy_hcall1(LHCALL_TS, 0);
- current_cr0 &= ~X86_CR0_TS;
+ return 0;
}
/*
@@ -1432,7 +1418,6 @@ __init void lguest_init(void)
pv_cpu_ops.load_tls = lguest_load_tls;
pv_cpu_ops.get_debugreg = lguest_get_debugreg;
pv_cpu_ops.set_debugreg = lguest_set_debugreg;
- pv_cpu_ops.clts = lguest_clts;
pv_cpu_ops.read_cr0 = lguest_read_cr0;
pv_cpu_ops.write_cr0 = lguest_write_cr0;
pv_cpu_ops.read_cr4 = lguest_read_cr4;
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index d376e4b48f88..c5959576c315 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -16,53 +16,6 @@
#include <asm/smap.h>
#include <asm/export.h>
-/* Standard copy_to_user with segment limit checking */
-ENTRY(_copy_to_user)
- mov PER_CPU_VAR(current_task), %rax
- movq %rdi,%rcx
- addq %rdx,%rcx
- jc bad_to_user
- cmpq TASK_addr_limit(%rax),%rcx
- ja bad_to_user
- ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
- "jmp copy_user_generic_string", \
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-ENDPROC(_copy_to_user)
-EXPORT_SYMBOL(_copy_to_user)
-
-/* Standard copy_from_user with segment limit checking */
-ENTRY(_copy_from_user)
- mov PER_CPU_VAR(current_task), %rax
- movq %rsi,%rcx
- addq %rdx,%rcx
- jc bad_from_user
- cmpq TASK_addr_limit(%rax),%rcx
- ja bad_from_user
- ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
- "jmp copy_user_generic_string", \
- X86_FEATURE_REP_GOOD, \
- "jmp copy_user_enhanced_fast_string", \
- X86_FEATURE_ERMS
-ENDPROC(_copy_from_user)
-EXPORT_SYMBOL(_copy_from_user)
-
-
- .section .fixup,"ax"
- /* must zero dest */
-ENTRY(bad_from_user)
-bad_from_user:
- movl %edx,%ecx
- xorl %eax,%eax
- rep
- stosb
-bad_to_user:
- movl %edx,%eax
- ret
-ENDPROC(bad_from_user)
- .previous
-
/*
* copy_user_generic_unrolled - memory copy with exception handling.
* This version is for CPUs like P4 that don't have efficient micro
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index d1dee753b949..07764255b611 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -113,14 +113,14 @@ int msr_clear_bit(u32 msr, u8 bit)
}
#ifdef CONFIG_TRACEPOINTS
-void do_trace_write_msr(unsigned msr, u64 val, int failed)
+void do_trace_write_msr(unsigned int msr, u64 val, int failed)
{
trace_write_msr(msr, val, failed);
}
EXPORT_SYMBOL(do_trace_write_msr);
EXPORT_TRACEPOINT_SYMBOL(write_msr);
-void do_trace_read_msr(unsigned msr, u64 val, int failed)
+void do_trace_read_msr(unsigned int msr, u64 val, int failed)
{
trace_read_msr(msr, val, failed);
}
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index b4908789484e..c074799bddae 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -34,3 +34,52 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return ret;
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
+
+/**
+ * copy_to_user: - Copy a block of data into user space.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from kernel space to user space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ */
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = __copy_to_user(to, from, n);
+ return n;
+}
+EXPORT_SYMBOL(_copy_to_user);
+
+/**
+ * copy_from_user: - Copy a block of data from user space.
+ * @to: Destination address, in kernel space.
+ * @from: Source address, in user space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only. This function may sleep if pagefaults are
+ * enabled.
+ *
+ * Copy data from user space to kernel space.
+ *
+ * Returns number of bytes that could not be copied.
+ * On success, this will be zero.
+ *
+ * If some data could not be copied, this function will pad the copied
+ * data to the requested size using zero bytes.
+ */
+unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
+{
+ if (access_ok(VERIFY_READ, from, n))
+ n = __copy_from_user(to, from, n);
+ else
+ memset(to, 0, n);
+ return n;
+}
+EXPORT_SYMBOL(_copy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 3bc7baf2a711..0b281217c890 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -640,52 +640,3 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
-
-/**
- * copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from kernel space to user space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- */
-unsigned long _copy_to_user(void __user *to, const void *from, unsigned n)
-{
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
- return n;
-}
-EXPORT_SYMBOL(_copy_to_user);
-
-/**
- * copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
- * Context: User context only. This function may sleep if pagefaults are
- * enabled.
- *
- * Copy data from user space to kernel space.
- *
- * Returns number of bytes that could not be copied.
- * On success, this will be zero.
- *
- * If some data could not be copied, this function will pad the copied
- * data to the requested size using zero bytes.
- */
-unsigned long _copy_from_user(void *to, const void __user *from, unsigned n)
-{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-EXPORT_SYMBOL(_copy_from_user);
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9f72ca3b2669..17c55a536fdd 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -679,8 +679,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
printk(KERN_CONT "paging request");
printk(KERN_CONT " at %p\n", (void *) address);
- printk(KERN_ALERT "IP:");
- printk_address(regs->ip);
+ printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip);
dump_pagetable(address);
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 83e701f160a9..efc32bc6862b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -986,20 +986,17 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
return 0;
}
-int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
+void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, pfn_t pfn)
{
enum page_cache_mode pcm;
if (!pat_enabled())
- return 0;
+ return;
/* Set prot based on lookup */
pcm = lookup_memtype(pfn_t_to_phys(pfn));
*prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
cachemode2protval(pcm));
-
- return 0;
}
/*
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index f88ce0e5efd9..2dab69a706ec 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -141,8 +141,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
* Called from the FPU code when creating a fresh set of FPU
* registers. This is called from a very specific context where
* we know the FPU regstiers are safe for use and we can use PKRU
- * directly. The fact that PKRU is only available when we are
- * using eagerfpu mode makes this possible.
+ * directly.
*/
void copy_init_pkru_to_fpregs(void)
{
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 28c04123b6dd..ffdbc4836b4f 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -339,10 +339,11 @@ fail:
return 0;
}
-static void nmi_cpu_setup(void *dummy)
+static void nmi_cpu_setup(void)
{
int cpu = smp_processor_id();
struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu);
+
nmi_cpu_save_registers(msrs);
raw_spin_lock(&oprofilefs_lock);
model->setup_ctrs(model, msrs);
@@ -369,7 +370,7 @@ static void nmi_cpu_restore_registers(struct op_msrs *msrs)
}
}
-static void nmi_cpu_shutdown(void *dummy)
+static void nmi_cpu_shutdown(void)
{
unsigned int v;
int cpu = smp_processor_id();
@@ -387,20 +388,26 @@ static void nmi_cpu_shutdown(void *dummy)
nmi_cpu_restore_registers(msrs);
}
-static void nmi_cpu_up(void *dummy)
+static int nmi_cpu_online(unsigned int cpu)
{
+ local_irq_disable();
if (nmi_enabled)
- nmi_cpu_setup(dummy);
+ nmi_cpu_setup();
if (ctr_running)
- nmi_cpu_start(dummy);
+ nmi_cpu_start(NULL);
+ local_irq_enable();
+ return 0;
}
-static void nmi_cpu_down(void *dummy)
+static int nmi_cpu_down_prep(unsigned int cpu)
{
+ local_irq_disable();
if (ctr_running)
- nmi_cpu_stop(dummy);
+ nmi_cpu_stop(NULL);
if (nmi_enabled)
- nmi_cpu_shutdown(dummy);
+ nmi_cpu_shutdown();
+ local_irq_enable();
+ return 0;
}
static int nmi_create_files(struct dentry *root)
@@ -433,26 +440,7 @@ static int nmi_create_files(struct dentry *root)
return 0;
}
-static int oprofile_cpu_notifier(struct notifier_block *b, unsigned long action,
- void *data)
-{
- int cpu = (unsigned long)data;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- smp_call_function_single(cpu, nmi_cpu_up, NULL, 0);
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, nmi_cpu_down, NULL, 1);
- break;
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block oprofile_cpu_nb = {
- .notifier_call = oprofile_cpu_notifier
-};
+static enum cpuhp_state cpuhp_nmi_online;
static int nmi_setup(void)
{
@@ -495,20 +483,17 @@ static int nmi_setup(void)
if (err)
goto fail;
- cpu_notifier_register_begin();
-
- /* Use get/put_online_cpus() to protect 'nmi_enabled' */
- get_online_cpus();
nmi_enabled = 1;
/* make nmi_enabled visible to the nmi handler: */
smp_mb();
- on_each_cpu(nmi_cpu_setup, NULL, 1);
- __register_cpu_notifier(&oprofile_cpu_nb);
- put_online_cpus();
-
- cpu_notifier_register_done();
-
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online",
+ nmi_cpu_online, nmi_cpu_down_prep);
+ if (err < 0)
+ goto fail_nmi;
+ cpuhp_nmi_online = err;
return 0;
+fail_nmi:
+ unregister_nmi_handler(NMI_LOCAL, "oprofile");
fail:
free_msrs();
return err;
@@ -518,17 +503,9 @@ static void nmi_shutdown(void)
{
struct op_msrs *msrs;
- cpu_notifier_register_begin();
-
- /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */
- get_online_cpus();
- on_each_cpu(nmi_cpu_shutdown, NULL, 1);
+ cpuhp_remove_state(cpuhp_nmi_online);
nmi_enabled = 0;
ctr_running = 0;
- __unregister_cpu_notifier(&oprofile_cpu_nb);
- put_online_cpus();
-
- cpu_notifier_register_done();
/* make variables visible to the nmi handler: */
smp_mb();
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index c20d2cc7ef64..ae387e5ee6f7 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -327,35 +327,18 @@ static int __init early_root_info_init(void)
#define ENABLE_CF8_EXT_CFG (1ULL << 46)
-static void enable_pci_io_ecs(void *unused)
+static int amd_bus_cpu_online(unsigned int cpu)
{
u64 reg;
+
rdmsrl(MSR_AMD64_NB_CFG, reg);
if (!(reg & ENABLE_CF8_EXT_CFG)) {
reg |= ENABLE_CF8_EXT_CFG;
wrmsrl(MSR_AMD64_NB_CFG, reg);
}
+ return 0;
}
-static int amd_cpu_notify(struct notifier_block *self, unsigned long action,
- void *hcpu)
-{
- int cpu = (long)hcpu;
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- smp_call_function_single(cpu, enable_pci_io_ecs, NULL, 0);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block amd_cpu_notifier = {
- .notifier_call = amd_cpu_notify,
-};
-
static void __init pci_enable_pci_io_ecs(void)
{
#ifdef CONFIG_AMD_NB
@@ -385,7 +368,7 @@ static void __init pci_enable_pci_io_ecs(void)
static int __init pci_io_ecs_init(void)
{
- int cpu;
+ int ret;
/* assume all cpus from fam10h have IO ECS */
if (boot_cpu_data.x86 < 0x10)
@@ -395,12 +378,9 @@ static int __init pci_io_ecs_init(void)
if (early_pci_allowed())
pci_enable_pci_io_ecs();
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
- (void *)(long)cpu);
- __register_cpu_notifier(&amd_cpu_notifier);
- cpu_notifier_register_done();
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/amd_bus:online",
+ amd_bus_cpu_online, NULL);
+ WARN_ON(ret < 0);
pci_probe |= PCI_HAS_IO_ECS;
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
index 1eb47b6298c2..e793fe509971 100644
--- a/arch/x86/platform/intel-mid/mfld.c
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -49,8 +49,13 @@ static unsigned long __init mfld_calibrate_tsc(void)
fast_calibrate = ratio * fsb;
pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
lapic_timer_frequency = fsb * 1000 / HZ;
- /* mark tsc clocksource as reliable */
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ /*
+ * TSC on Intel Atom SoCs is reliable and of known frequency.
+ * See tsc_msr.c for details.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
return fast_calibrate;
}
diff --git a/arch/x86/platform/intel-mid/mrfld.c b/arch/x86/platform/intel-mid/mrfld.c
index 59253db41bbc..e0607c77a1bd 100644
--- a/arch/x86/platform/intel-mid/mrfld.c
+++ b/arch/x86/platform/intel-mid/mrfld.c
@@ -78,8 +78,12 @@ static unsigned long __init tangier_calibrate_tsc(void)
pr_debug("Setting lapic_timer_frequency = %d\n",
lapic_timer_frequency);
- /* mark tsc clocksource as reliable */
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+ /*
+ * TSC on Intel Atom SoCs is reliable and of known frequency.
+ * See tsc_msr.c for details.
+ */
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
return fast_calibrate;
}
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
index 67375dda451c..ef03852ea6e8 100644
--- a/arch/x86/platform/intel-mid/pwr.c
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -270,7 +270,6 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
return 0;
}
-EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
{
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index cd5173a2733f..8410e7d0a5b5 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -387,8 +387,8 @@ static void uv_nmi_dump_cpu_ip_hdr(void)
/* Dump Instruction Pointer info */
static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
{
- pr_info("UV: %4d %6d %-32.32s ", cpu, current->pid, current->comm);
- printk_address(regs->ip);
+ pr_info("UV: %4d %6d %-32.32s %pS",
+ cpu, current->pid, current->comm, (void *)regs->ip);
}
/*
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
index 1ac76479c266..8730c2882fff 100644
--- a/arch/x86/ras/mce_amd_inj.c
+++ b/arch/x86/ras/mce_amd_inj.c
@@ -275,6 +275,8 @@ static void do_inject(void)
unsigned int cpu = i_mce.extcpu;
u8 b = i_mce.bank;
+ rdtscll(i_mce.tsc);
+
if (i_mce.misc)
i_mce.status |= MCI_STATUS_MISCV;
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 25012abc3409..4463fa72db94 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -69,7 +69,7 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
# ---------------------------------------------------------------------------
-KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
+KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
-I$(srctree)/arch/x86/boot
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index ba70ff232917..1972565ab106 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -269,7 +269,8 @@ int main(int argc, char **argv)
insns++;
}
- fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
+ fprintf((errors) ? stderr : stdout,
+ "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
prog,
(errors) ? "Failure" : "Success",
insns,
diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c
index 56f04db0c9c0..ecf31e0358c8 100644
--- a/arch/x86/tools/test_get_len.c
+++ b/arch/x86/tools/test_get_len.c
@@ -167,7 +167,7 @@ int main(int argc, char **argv)
fprintf(stderr, "Warning: decoded and checked %d"
" instructions with %d warnings\n", insns, warnings);
else
- fprintf(stderr, "Succeed: decoded and checked %d"
+ fprintf(stdout, "Success: decoded and checked %d"
" instructions\n", insns);
return 0;
}
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h
index 233ee09c1ce8..c77db2288982 100644
--- a/arch/x86/um/asm/processor.h
+++ b/arch/x86/um/asm/processor.h
@@ -26,7 +26,6 @@ static inline void rep_nop(void)
}
#define cpu_relax() rep_nop()
-#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(t) (&(t)->thread.regs)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bdd855685403..ced7027b3fbc 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -980,17 +980,6 @@ static void xen_io_delay(void)
{
}
-static void xen_clts(void)
-{
- struct multicall_space mcs;
-
- mcs = xen_mc_entry(0);
-
- MULTI_fpu_taskswitch(mcs.mc, 0);
-
- xen_mc_issue(PARAVIRT_LAZY_CPU);
-}
-
static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
static unsigned long xen_read_cr0(void)
@@ -1233,8 +1222,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.set_debugreg = xen_set_debugreg,
.get_debugreg = xen_get_debugreg,
- .clts = xen_clts,
-
.read_cr0 = xen_read_cr0,
.write_cr0 = xen_write_cr0,
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 3d6e0064cbfc..e8a9ea7d7a21 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -114,6 +114,7 @@ void xen_uninit_lock_cpu(int cpu)
per_cpu(irq_name, cpu) = NULL;
}
+PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
/*
* Our init of PV spinlocks is split in two init functions due to us
@@ -137,6 +138,7 @@ void __init xen_init_spinlocks(void)
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
pv_lock_ops.wait = xen_qlock_wait;
pv_lock_ops.kick = xen_qlock_kick;
+ pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
}
/*
diff --git a/arch/xtensa/include/asm/mutex.h b/arch/xtensa/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/xtensa/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index b42d68bfe3cf..86ffcd68e496 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -206,7 +206,6 @@ extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() cpu_relax()
/* Special register access. */
diff --git a/drivers/Makefile b/drivers/Makefile
index 194d20bee7dc..060026a02f59 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -107,7 +107,7 @@ obj-$(CONFIG_INPUT) += input/
obj-$(CONFIG_RTC_LIB) += rtc/
obj-y += i2c/ media/
obj-$(CONFIG_PPS) += pps/
-obj-$(CONFIG_PTP_1588_CLOCK) += ptp/
+obj-y += ptp/
obj-$(CONFIG_W1) += w1/
obj-y += power/
obj-$(CONFIG_HWMON) += hwmon/
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 56190d00fd87..5cbefd7621f0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -331,6 +331,16 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+#ifdef CONFIG_X86
+ if (boot_cpu_has(X86_FEATURE_HWP)) {
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT;
+ }
+#endif
+
+ if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;
+
if (!ghes_disable)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 2376628c599c..eb3af2739537 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -363,6 +363,7 @@ static ssize_t file_name##_show(struct device *dev, \
return sprintf(buf, "%u\n", this_leaf->object); \
}
+show_one(id, id);
show_one(level, level);
show_one(coherency_line_size, coherency_line_size);
show_one(number_of_sets, number_of_sets);
@@ -444,6 +445,7 @@ static ssize_t write_policy_show(struct device *dev,
return n;
}
+static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(level);
static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(coherency_line_size);
@@ -457,6 +459,7 @@ static DEVICE_ATTR_RO(shared_cpu_list);
static DEVICE_ATTR_RO(physical_line_partition);
static struct attribute *cache_default_attrs[] = {
+ &dev_attr_id.attr,
&dev_attr_type.attr,
&dev_attr_level.attr,
&dev_attr_shared_cpu_map.attr,
@@ -480,6 +483,8 @@ cache_default_attrs_is_visible(struct kobject *kobj,
const struct cpumask *mask = &this_leaf->shared_cpu_map;
umode_t mode = attr->mode;
+ if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
+ return mode;
if ((attr == &dev_attr_type.attr) && this_leaf->type)
return mode;
if ((attr == &dev_attr_level.attr) && this_leaf->level)
@@ -628,57 +633,30 @@ err:
return rc;
}
-static void cache_remove_dev(unsigned int cpu)
+static int cacheinfo_cpu_online(unsigned int cpu)
{
- if (!cpumask_test_cpu(cpu, &cache_dev_map))
- return;
- cpumask_clear_cpu(cpu, &cache_dev_map);
+ int rc = detect_cache_attributes(cpu);
- cpu_cache_sysfs_exit(cpu);
+ if (rc)
+ return rc;
+ rc = cache_add_dev(cpu);
+ if (rc)
+ free_cache_attributes(cpu);
+ return rc;
}
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int cacheinfo_cpu_pre_down(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
+ if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
+ cpu_cache_sysfs_exit(cpu);
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = detect_cache_attributes(cpu);
- if (!rc)
- rc = cache_add_dev(cpu);
- break;
- case CPU_DEAD:
- cache_remove_dev(cpu);
- free_cache_attributes(cpu);
- break;
- }
- return notifier_from_errno(rc);
+ free_cache_attributes(cpu);
+ return 0;
}
static int __init cacheinfo_sysfs_init(void)
{
- int cpu, rc = 0;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- rc = detect_cache_attributes(cpu);
- if (rc)
- goto out;
- rc = cache_add_dev(cpu);
- if (rc) {
- free_cache_attributes(cpu);
- pr_err("error populating cacheinfo..cpu%d\n", cpu);
- goto out;
- }
- }
- __hotcpu_notifier(cacheinfo_cpu_callback, 0);
-
-out:
- cpu_notifier_register_done();
- return rc;
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
+ cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
}
-
device_initcall(cacheinfo_sysfs_init);
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index efec10b49d59..1cda505d6a85 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -10,6 +10,7 @@
#include <linux/pm-trace.h>
#include <linux/export.h>
#include <linux/rtc.h>
+#include <linux/suspend.h>
#include <linux/mc146818rtc.h>
@@ -74,6 +75,9 @@
#define DEVSEED (7919)
+bool pm_trace_rtc_abused __read_mostly;
+EXPORT_SYMBOL_GPL(pm_trace_rtc_abused);
+
static unsigned int dev_hash_value;
static int set_magic_time(unsigned int user, unsigned int file, unsigned int device)
@@ -104,6 +108,7 @@ static int set_magic_time(unsigned int user, unsigned int file, unsigned int dev
time.tm_min = (n % 20) * 3;
n /= 20;
mc146818_set_time(&time);
+ pm_trace_rtc_abused = true;
return n ? -1 : 0;
}
@@ -239,9 +244,31 @@ int show_trace_dev_match(char *buf, size_t size)
return ret;
}
+static int
+pm_trace_notify(struct notifier_block *nb, unsigned long mode, void *_unused)
+{
+ switch (mode) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ if (pm_trace_rtc_abused) {
+ pm_trace_rtc_abused = false;
+ pr_warn("Possible incorrect RTC due to pm_trace, please use 'ntpdate' or 'rdate' to reset it.\n");
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block pm_trace_nb = {
+ .notifier_call = pm_trace_notify,
+};
+
static int early_resume_init(void)
{
hash_value_early_read = read_magic_time();
+ register_pm_notifier(&pm_trace_nb);
return 0;
}
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index df3c97cb4c99..d6ec1c546f5b 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -118,51 +118,19 @@ static int topology_add_dev(unsigned int cpu)
return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
-static void topology_remove_dev(unsigned int cpu)
+static int topology_remove_dev(unsigned int cpu)
{
struct device *dev = get_cpu_device(cpu);
sysfs_remove_group(&dev->kobj, &topology_attr_group);
-}
-
-static int topology_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- int rc = 0;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- rc = topology_add_dev(cpu);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- topology_remove_dev(cpu);
- break;
- }
- return notifier_from_errno(rc);
+ return 0;
}
static int topology_sysfs_init(void)
{
- int cpu;
- int rc = 0;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- rc = topology_add_dev(cpu);
- if (rc)
- goto out;
- }
- __hotcpu_notifier(topology_cpu_callback, 0);
-
-out:
- cpu_notifier_register_done();
- return rc;
+ return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE,
+ "base/topology:prepare", topology_add_dev,
+ topology_remove_dev);
}
device_initcall(topology_sysfs_init);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 4b5cd3a7b2b6..12046f4f00e4 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -160,82 +160,56 @@ int zcomp_decompress(struct zcomp_strm *zstrm,
dst, &dst_len);
}
-static int __zcomp_cpu_notifier(struct zcomp *comp,
- unsigned long action, unsigned long cpu)
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
{
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
struct zcomp_strm *zstrm;
- switch (action) {
- case CPU_UP_PREPARE:
- if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
- break;
- zstrm = zcomp_strm_alloc(comp);
- if (IS_ERR_OR_NULL(zstrm)) {
- pr_err("Can't allocate a compression stream\n");
- return NOTIFY_BAD;
- }
- *per_cpu_ptr(comp->stream, cpu) = zstrm;
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- zstrm = *per_cpu_ptr(comp->stream, cpu);
- if (!IS_ERR_OR_NULL(zstrm))
- zcomp_strm_free(zstrm);
- *per_cpu_ptr(comp->stream, cpu) = NULL;
- break;
- default:
- break;
+ if (WARN_ON(*per_cpu_ptr(comp->stream, cpu)))
+ return 0;
+
+ zstrm = zcomp_strm_alloc(comp);
+ if (IS_ERR_OR_NULL(zstrm)) {
+ pr_err("Can't allocate a compression stream\n");
+ return -ENOMEM;
}
- return NOTIFY_OK;
+ *per_cpu_ptr(comp->stream, cpu) = zstrm;
+ return 0;
}
-static int zcomp_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node)
{
- unsigned long cpu = (unsigned long)pcpu;
- struct zcomp *comp = container_of(nb, typeof(*comp), notifier);
+ struct zcomp *comp = hlist_entry(node, struct zcomp, node);
+ struct zcomp_strm *zstrm;
- return __zcomp_cpu_notifier(comp, action, cpu);
+ zstrm = *per_cpu_ptr(comp->stream, cpu);
+ if (!IS_ERR_OR_NULL(zstrm))
+ zcomp_strm_free(zstrm);
+ *per_cpu_ptr(comp->stream, cpu) = NULL;
+ return 0;
}
static int zcomp_init(struct zcomp *comp)
{
- unsigned long cpu;
int ret;
- comp->notifier.notifier_call = zcomp_cpu_notifier;
-
comp->stream = alloc_percpu(struct zcomp_strm *);
if (!comp->stream)
return -ENOMEM;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu) {
- ret = __zcomp_cpu_notifier(comp, CPU_UP_PREPARE, cpu);
- if (ret == NOTIFY_BAD)
- goto cleanup;
- }
- __register_cpu_notifier(&comp->notifier);
- cpu_notifier_register_done();
+ ret = cpuhp_state_add_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
+ if (ret < 0)
+ goto cleanup;
return 0;
cleanup:
- for_each_online_cpu(cpu)
- __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
- cpu_notifier_register_done();
- return -ENOMEM;
+ free_percpu(comp->stream);
+ return ret;
}
void zcomp_destroy(struct zcomp *comp)
{
- unsigned long cpu;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu);
- __unregister_cpu_notifier(&comp->notifier);
- cpu_notifier_register_done();
-
+ cpuhp_state_remove_instance(CPUHP_ZCOMP_PREPARE, &comp->node);
free_percpu(comp->stream);
kfree(comp);
}
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 478cac2ed465..41c1002a7d7d 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -19,11 +19,12 @@ struct zcomp_strm {
/* dynamic per-device compression frontend */
struct zcomp {
struct zcomp_strm * __percpu *stream;
- struct notifier_block notifier;
-
const char *name;
+ struct hlist_node node;
};
+int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
+int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
ssize_t zcomp_available_show(const char *comp, char *buf);
bool zcomp_available_algorithm(const char *comp);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 5497f7fc44d0..15f58ab44d0b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -30,6 +30,7 @@
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/sysfs.h>
+#include <linux/cpuhotplug.h>
#include "zram_drv.h"
@@ -1443,15 +1444,22 @@ static void destroy_devices(void)
idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
idr_destroy(&zram_index_idr);
unregister_blkdev(zram_major, "zram");
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
}
static int __init zram_init(void)
{
int ret;
+ ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
+ zcomp_cpu_up_prepare, zcomp_cpu_dead);
+ if (ret < 0)
+ return ret;
+
ret = class_register(&zram_control_class);
if (ret) {
pr_err("Unable to register zram-control class\n");
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return ret;
}
@@ -1459,6 +1467,7 @@ static int __init zram_init(void)
if (zram_major <= 0) {
pr_err("Unable to get major number\n");
class_unregister(&zram_control_class);
+ cpuhp_remove_multi_state(CPUHP_ZCOMP_PREPARE);
return -EBUSY;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index dcc09739a54e..45ba878ae025 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -542,6 +542,7 @@ config HANGCHECK_TIMER
config MMTIMER
tristate "MMTIMER Memory mapped RTC for SGI Altix"
depends on IA64_GENERIC || IA64_SGI_SN2
+ depends on POSIX_TIMERS
default y
help
The mmtimer device allows direct userspace access to the
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 44ce80606944..d1f5bb534e0e 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -70,21 +70,17 @@ enum {
* until we have 4 bytes, thus returning a u32 at a time,
* instead of the current u8-at-a-time.
*
- * Padlock instructions can generate a spurious DNA fault, so
- * we have to call them in the context of irq_ts_save/restore()
+ * Padlock instructions can generate a spurious DNA fault, but the
+ * kernel doesn't use CR0.TS, so this doesn't matter.
*/
static inline u32 xstore(u32 *addr, u32 edx_in)
{
u32 eax_out;
- int ts_state;
-
- ts_state = irq_ts_save();
asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
- irq_ts_restore(ts_state);
return eax_out;
}
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 73c487da6d2a..02fef6830e72 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -81,6 +81,7 @@ static struct clock_event_device __percpu *arch_timer_evt;
static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
+static bool arch_counter_suspend_stop;
static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
@@ -576,7 +577,7 @@ static struct clocksource clocksource_counter = {
.rating = 400,
.read = arch_counter_read,
.mask = CLOCKSOURCE_MASK(56),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter = {
@@ -616,6 +617,8 @@ static void __init arch_counter_register(unsigned type)
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
+ if (!arch_counter_suspend_stop)
+ clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter();
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
@@ -907,6 +910,10 @@ static int __init arch_timer_of_init(struct device_node *np)
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
arch_timer_uses_ppi = PHYS_SECURE_PPI;
+ /* On some systems, the counter stops ticking when in suspend. */
+ arch_counter_suspend_stop = of_property_read_bool(np,
+ "arm,no-tick-in-suspend");
+
return arch_timer_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
@@ -964,8 +971,9 @@ static int __init arch_timer_mem_init(struct device_node *np)
}
ret= -ENXIO;
- base = arch_counter_base = of_iomap(best_frame, 0);
- if (!base) {
+ base = arch_counter_base = of_io_request_and_map(best_frame, 0,
+ "arch_mem_timer");
+ if (IS_ERR(base)) {
pr_err("arch_timer: Can't map frame's registers\n");
goto out;
}
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index e71acf231c89..f2f29d2be1cf 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -96,7 +96,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = of_property_read_u32(node, "clock-frequency", &freq);
if (ret) {
pr_err("Can't read clock-frequency");
- return ret;
+ goto err_iounmap;
}
system_clock = base + REG_COUNTER_LO;
@@ -108,13 +108,15 @@ static int __init bcm2835_timer_init(struct device_node *node)
irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
if (irq <= 0) {
pr_err("Can't parse IRQ");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (!timer) {
pr_err("Can't allocate timer struct\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_iounmap;
}
timer->control = base + REG_CONTROL;
@@ -133,7 +135,7 @@ static int __init bcm2835_timer_init(struct device_node *node)
ret = setup_irq(irq, &timer->act);
if (ret) {
pr_err("Can't set up timer IRQ\n");
- return ret;
+ goto err_iounmap;
}
clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
@@ -141,6 +143,10 @@ static int __init bcm2835_timer_init(struct device_node *node)
pr_info("bcm2835: system timer (irq = %d)\n", irq);
return 0;
+
+err_iounmap:
+ iounmap(base);
+ return ret;
}
CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
bcm2835_timer_init);
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de1cea5..35f71825b7f3 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -6,6 +6,7 @@ config X86_INTEL_PSTATE
bool "Intel P state control"
depends on X86
select ACPI_PROCESSOR if ACPI
+ select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index a7f597a476f5..12e1f3cdfb09 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -46,6 +46,7 @@
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
#endif
#define FRAC_BITS 8
@@ -399,14 +400,67 @@ static bool intel_pstate_get_ppc_enable_status(void)
return acpi_ppc;
}
+#ifdef CONFIG_ACPI_CPPC_LIB
+
+/* The work item is needed to avoid CPU hotplug locking issues */
+static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
+{
+ sched_set_itmt_support();
+}
+
+static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
+
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+ struct cppc_perf_caps cppc_perf;
+ static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
+ int ret;
+
+ ret = cppc_get_perf_caps(cpu, &cppc_perf);
+ if (ret)
+ return;
+
+ /*
+ * The priorities can be set regardless of whether or not
+ * sched_set_itmt_support(true) has been called and it is valid to
+ * update them at any time after it has been called.
+ */
+ sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
+
+ if (max_highest_perf <= min_highest_perf) {
+ if (cppc_perf.highest_perf > max_highest_perf)
+ max_highest_perf = cppc_perf.highest_perf;
+
+ if (cppc_perf.highest_perf < min_highest_perf)
+ min_highest_perf = cppc_perf.highest_perf;
+
+ if (max_highest_perf > min_highest_perf) {
+ /*
+ * This code can be run during CPU online under the
+ * CPU hotplug locks, so sched_set_itmt_support()
+ * cannot be called from here. Queue up a work item
+ * to invoke it.
+ */
+ schedule_work(&sched_itmt_work);
+ }
+ }
+}
+#else
+static void intel_pstate_set_itmt_prio(int cpu)
+{
+}
+#endif
+
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
struct cpudata *cpu;
int ret;
int i;
- if (hwp_active)
+ if (hwp_active) {
+ intel_pstate_set_itmt_prio(policy->cpu);
return;
+ }
if (!intel_pstate_get_ppc_enable_status())
return;
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 441e86b23571..b3869748cc6b 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -183,8 +183,8 @@ static inline void padlock_store_cword(struct cword *cword)
/*
* While the padlock instructions don't use FP/SSE registers, they
- * generate a spurious DNA fault when cr0.ts is '1'. These instructions
- * should be used only inside the irq_ts_save/restore() context
+ * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
+ * the kernel doesn't use CR0.TS.
*/
static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
@@ -298,24 +298,18 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
- ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aes_ctx *ctx = aes_ctx(tfm);
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
- ts_state = irq_ts_save();
ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
}
@@ -346,14 +340,12 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->E, &ctx->cword.encrypt,
@@ -361,7 +353,6 @@ static int ecb_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
@@ -375,14 +366,12 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.decrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, &ctx->cword.decrypt,
@@ -390,7 +379,6 @@ static int ecb_aes_decrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.encrypt);
@@ -425,14 +413,12 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
walk.dst.virt.addr, ctx->E,
@@ -442,7 +428,6 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
padlock_store_cword(&ctx->cword.decrypt);
@@ -456,14 +441,12 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
- int ts_state;
padlock_reset_key(&ctx->cword.encrypt);
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- ts_state = irq_ts_save();
while ((nbytes = walk.nbytes)) {
padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
ctx->D, walk.iv, &ctx->cword.decrypt,
@@ -472,8 +455,6 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
err = blkcipher_walk_done(desc, &walk, nbytes);
}
- irq_ts_restore(ts_state);
-
padlock_store_cword(&ctx->cword.encrypt);
return err;
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 8c5f90647b7a..bc72d20c32c3 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -89,7 +89,6 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
struct sha1_state state;
unsigned int space;
unsigned int leftover;
- int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -120,14 +119,11 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
memcpy(result, &state.state, SHA1_DIGEST_SIZE);
- /* prevent taking the spurious DNA fault with padlock. */
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
- irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
@@ -155,7 +151,6 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
struct sha256_state state;
unsigned int space;
unsigned int leftover;
- int ts_state;
int err;
dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -186,14 +181,11 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
memcpy(result, &state.state, SHA256_DIGEST_SIZE);
- /* prevent taking the spurious DNA fault with padlock. */
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
: \
: "c"((unsigned long)state.count + count), \
"a"((unsigned long)state.count), \
"S"(in), "D"(result));
- irq_ts_restore(ts_state);
padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
@@ -312,7 +304,6 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -328,23 +319,19 @@ static int padlock_sha1_update_nano(struct shash_desc *desc,
memcpy(sctx->buffer + partial, data,
done + SHA1_BLOCK_SIZE);
src = sctx->buffer;
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst) \
: "a"((long)-1), "c"((unsigned long)1));
- irq_ts_restore(ts_state);
done += SHA1_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from the input data */
if (len - done >= SHA1_BLOCK_SIZE) {
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
- irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
src = data + done;
}
@@ -401,7 +388,6 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
((aligned(STACK_ALIGN)));
u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
- int ts_state;
partial = sctx->count & 0x3f;
sctx->count += len;
@@ -417,23 +403,19 @@ static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
memcpy(sctx->buf + partial, data,
done + SHA256_BLOCK_SIZE);
src = sctx->buf;
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1), "c"((unsigned long)1));
- irq_ts_restore(ts_state);
done += SHA256_BLOCK_SIZE;
src = data + done;
}
/* Process the left bytes from input data*/
if (len - done >= SHA256_BLOCK_SIZE) {
- ts_state = irq_ts_save();
asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
: "+S"(src), "+D"(dst)
: "a"((long)-1),
"c"((unsigned long)((len - done) / 64)));
- irq_ts_restore(ts_state);
done += ((len - done) - (len - done) % 64);
src = data + done;
}
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index 48fa4cf9f64a..2f3bbc88ff2a 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -106,8 +106,6 @@
#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
-/* IOAT1 define left for i7300_idle driver to not fail compiling */
-#define IOAT1_CHANSTS_OFFSET 0x04
#define IOAT_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL)
#define IOAT_CHANSTS_SOFT_ERR 0x10ULL
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index daaac2c79ca7..80762acd8cc8 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -851,7 +851,7 @@ static void decode_mc6_mce(struct mce *m)
/* Decode errors according to Scalable MCA specification */
static void decode_smca_errors(struct mce *m)
{
- struct smca_hwid_mcatype *type;
+ struct smca_hwid *hwid;
unsigned int bank_type;
const char *ip_name;
u8 xec = XEC(m->status, xec_mask);
@@ -862,18 +862,18 @@ static void decode_smca_errors(struct mce *m)
if (boot_cpu_data.x86 >= 0x17 && m->bank == 4)
pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n");
- type = smca_banks[m->bank].type;
- if (!type)
+ hwid = smca_banks[m->bank].hwid;
+ if (!hwid)
return;
- bank_type = type->bank_type;
- ip_name = smca_bank_names[bank_type].long_name;
+ bank_type = hwid->bank_type;
+ ip_name = smca_get_long_name(bank_type);
pr_emerg(HW_ERR "%s Extended Error Code: %d\n", ip_name, xec);
/* Only print the decode of valid error codes */
if (xec < smca_mce_descs[bank_type].num_descs &&
- (type->xec_bitmap & BIT_ULL(xec))) {
+ (hwid->xec_bitmap & BIT_ULL(xec))) {
pr_emerg(HW_ERR "%s Error: ", ip_name);
pr_cont("%s.\n", smca_mce_descs[bank_type].descs[xec]);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index c981be17d3c0..2e78b0b96d74 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -129,7 +129,25 @@ config EFI_TEST
Say Y here to enable the runtime services support via /dev/efi_test.
If unsure, say N.
+config APPLE_PROPERTIES
+ bool "Apple Device Properties"
+ depends on EFI_STUB && X86
+ select EFI_DEV_PATH_PARSER
+ select UCS2_STRING
+ help
+ Retrieve properties from EFI on Apple Macs and assign them to
+ devices, allowing for improved support of Apple hardware.
+ Properties that would otherwise be missing include the
+ Thunderbolt Device ROM and GPU configuration data.
+
+ If unsure, say Y if you have a Mac. Otherwise N.
+
endmenu
config UEFI_CPER
bool
+
+config EFI_DEV_PATH_PARSER
+ bool
+ depends on ACPI
+ default n
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index c8a439f6d715..ad67342313ed 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_EFI_STUB) += libstub/
obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o
obj-$(CONFIG_EFI_TEST) += test/
+obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o
+obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o
arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c
new file mode 100644
index 000000000000..c473f4c5ca34
--- /dev/null
+++ b/drivers/firmware/efi/apple-properties.c
@@ -0,0 +1,248 @@
+/*
+ * apple-properties.c - EFI device properties on Macs
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "apple-properties: " fmt
+
+#include <linux/bootmem.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <asm/setup.h>
+
+static bool dump_properties __initdata;
+
+static int __init dump_properties_enable(char *arg)
+{
+ dump_properties = true;
+ return 0;
+}
+
+__setup("dump_apple_properties", dump_properties_enable);
+
+struct dev_header {
+ u32 len;
+ u32 prop_count;
+ struct efi_dev_path path[0];
+ /*
+ * followed by key/value pairs, each key and value preceded by u32 len,
+ * len includes itself, value may be empty (in which case its len is 4)
+ */
+};
+
+struct properties_header {
+ u32 len;
+ u32 version;
+ u32 dev_count;
+ struct dev_header dev_header[0];
+};
+
+static u8 one __initdata = 1;
+
+static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
+ struct device *dev, void *ptr,
+ struct property_entry entry[])
+{
+ int i;
+
+ for (i = 0; i < dev_header->prop_count; i++) {
+ int remaining = dev_header->len - (ptr - (void *)dev_header);
+ u32 key_len, val_len;
+ char *key;
+
+ if (sizeof(key_len) > remaining)
+ break;
+
+ key_len = *(typeof(key_len) *)ptr;
+ if (key_len + sizeof(val_len) > remaining ||
+ key_len < sizeof(key_len) + sizeof(efi_char16_t) ||
+ *(efi_char16_t *)(ptr + sizeof(key_len)) == 0) {
+ dev_err(dev, "invalid property name len at %#zx\n",
+ ptr - (void *)dev_header);
+ break;
+ }
+
+ val_len = *(typeof(val_len) *)(ptr + key_len);
+ if (key_len + val_len > remaining ||
+ val_len < sizeof(val_len)) {
+ dev_err(dev, "invalid property val len at %#zx\n",
+ ptr - (void *)dev_header + key_len);
+ break;
+ }
+
+ /* 4 bytes to accommodate UTF-8 code points + null byte */
+ key = kzalloc((key_len - sizeof(key_len)) * 4 + 1, GFP_KERNEL);
+ if (!key) {
+ dev_err(dev, "cannot allocate property name\n");
+ break;
+ }
+ ucs2_as_utf8(key, ptr + sizeof(key_len),
+ key_len - sizeof(key_len));
+
+ entry[i].name = key;
+ entry[i].is_array = true;
+ entry[i].length = val_len - sizeof(val_len);
+ entry[i].pointer.raw_data = ptr + key_len + sizeof(val_len);
+ if (!entry[i].length) {
+ /* driver core doesn't accept empty properties */
+ entry[i].length = 1;
+ entry[i].pointer.raw_data = &one;
+ }
+
+ if (dump_properties) {
+ dev_info(dev, "property: %s\n", entry[i].name);
+ print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, entry[i].pointer.raw_data,
+ entry[i].length, true);
+ }
+
+ ptr += key_len + val_len;
+ }
+
+ if (i != dev_header->prop_count) {
+ dev_err(dev, "got %d device properties, expected %u\n", i,
+ dev_header->prop_count);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ return;
+ }
+
+ dev_info(dev, "assigning %d device properties\n", i);
+}
+
+static int __init unmarshal_devices(struct properties_header *properties)
+{
+ size_t offset = offsetof(struct properties_header, dev_header[0]);
+
+ while (offset + sizeof(struct dev_header) < properties->len) {
+ struct dev_header *dev_header = (void *)properties + offset;
+ struct property_entry *entry = NULL;
+ struct device *dev;
+ size_t len;
+ int ret, i;
+ void *ptr;
+
+ if (offset + dev_header->len > properties->len ||
+ dev_header->len <= sizeof(*dev_header)) {
+ pr_err("invalid len in dev_header at %#zx\n", offset);
+ return -EINVAL;
+ }
+
+ ptr = dev_header->path;
+ len = dev_header->len - sizeof(*dev_header);
+
+ dev = efi_get_device_by_path((struct efi_dev_path **)&ptr, &len);
+ if (IS_ERR(dev)) {
+ pr_err("device path parse error %ld at %#zx:\n",
+ PTR_ERR(dev), ptr - (void *)dev_header);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, dev_header, dev_header->len, true);
+ dev = NULL;
+ goto skip_device;
+ }
+
+ entry = kcalloc(dev_header->prop_count + 1, sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry) {
+ dev_err(dev, "cannot allocate properties\n");
+ goto skip_device;
+ }
+
+ unmarshal_key_value_pairs(dev_header, dev, ptr, entry);
+ if (!entry[0].name)
+ goto skip_device;
+
+ ret = device_add_properties(dev, entry); /* makes deep copy */
+ if (ret)
+ dev_err(dev, "error %d assigning properties\n", ret);
+
+ for (i = 0; entry[i].name; i++)
+ kfree(entry[i].name);
+
+skip_device:
+ kfree(entry);
+ put_device(dev);
+ offset += dev_header->len;
+ }
+
+ return 0;
+}
+
+static int __init map_properties(void)
+{
+ struct properties_header *properties;
+ struct setup_data *data;
+ u32 data_len;
+ u64 pa_data;
+ int ret;
+
+ if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.") &&
+ !dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc."))
+ return 0;
+
+ pa_data = boot_params.hdr.setup_data;
+ while (pa_data) {
+ data = ioremap(pa_data, sizeof(*data));
+ if (!data) {
+ pr_err("cannot map setup_data header\n");
+ return -ENOMEM;
+ }
+
+ if (data->type != SETUP_APPLE_PROPERTIES) {
+ pa_data = data->next;
+ iounmap(data);
+ continue;
+ }
+
+ data_len = data->len;
+ iounmap(data);
+
+ data = ioremap(pa_data, sizeof(*data) + data_len);
+ if (!data) {
+ pr_err("cannot map setup_data payload\n");
+ return -ENOMEM;
+ }
+
+ properties = (struct properties_header *)data->data;
+ if (properties->version != 1) {
+ pr_err("unsupported version:\n");
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -ENOTSUPP;
+ } else if (properties->len != data_len) {
+ pr_err("length mismatch, expected %u\n", data_len);
+ print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET,
+ 16, 1, properties, data_len, true);
+ ret = -EINVAL;
+ } else
+ ret = unmarshal_devices(properties);
+
+ /*
+ * Can only free the setup_data payload but not its header
+ * to avoid breaking the chain of ->next pointers.
+ */
+ data->len = 0;
+ iounmap(data);
+ free_bootmem_late(pa_data + sizeof(*data), data_len);
+
+ return ret;
+ }
+ return 0;
+}
+
+fs_initcall(map_properties);
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index 8efe13075c92..f853ad2c4ca0 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -244,8 +244,10 @@ void __init efi_init(void)
"Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
efi.memmap.desc_version);
- if (uefi_init() < 0)
+ if (uefi_init() < 0) {
+ efi_memmap_unmap();
return;
+ }
reserve_regions();
efi_memattr_init();
diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c
new file mode 100644
index 000000000000..85d1834ee9b7
--- /dev/null
+++ b/drivers/firmware/efi/dev-path-parser.c
@@ -0,0 +1,203 @@
+/*
+ * dev-path-parser.c - EFI Device Path parser
+ * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/pci.h>
+
+struct acpi_hid_uid {
+ struct acpi_device_id hid[2];
+ char uid[11]; /* UINT_MAX + null byte */
+};
+
+static int __init match_acpi_dev(struct device *dev, void *data)
+{
+ struct acpi_hid_uid hid_uid = *(struct acpi_hid_uid *)data;
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ if (acpi_match_device_ids(adev, hid_uid.hid))
+ return 0;
+
+ if (adev->pnp.unique_id)
+ return !strcmp(adev->pnp.unique_id, hid_uid.uid);
+ else
+ return !strcmp("0", hid_uid.uid);
+}
+
+static long __init parse_acpi_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ struct acpi_hid_uid hid_uid = {};
+ struct device *phys_dev;
+
+ if (node->length != 12)
+ return -EINVAL;
+
+ sprintf(hid_uid.hid[0].id, "%c%c%c%04X",
+ 'A' + ((node->acpi.hid >> 10) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1,
+ 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1,
+ node->acpi.hid >> 16);
+ sprintf(hid_uid.uid, "%u", node->acpi.uid);
+
+ *child = bus_find_device(&acpi_bus_type, NULL, &hid_uid,
+ match_acpi_dev);
+ if (!*child)
+ return -ENODEV;
+
+ phys_dev = acpi_get_first_physical_node(to_acpi_device(*child));
+ if (phys_dev) {
+ get_device(phys_dev);
+ put_device(*child);
+ *child = phys_dev;
+ }
+
+ return 0;
+}
+
+static int __init match_pci_dev(struct device *dev, void *data)
+{
+ unsigned int devfn = *(unsigned int *)data;
+
+ return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn;
+}
+
+static long __init parse_pci_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ unsigned int devfn;
+
+ if (node->length != 6)
+ return -EINVAL;
+ if (!parent)
+ return -EINVAL;
+
+ devfn = PCI_DEVFN(node->pci.dev, node->pci.fn);
+
+ *child = device_find_child(parent, &devfn, match_pci_dev);
+ if (!*child)
+ return -ENODEV;
+
+ return 0;
+}
+
+/*
+ * Insert parsers for further node types here.
+ *
+ * Each parser takes a pointer to the @node and to the @parent (will be NULL
+ * for the first device path node). If a device corresponding to @node was
+ * found below @parent, its reference count should be incremented and the
+ * device returned in @child.
+ *
+ * The return value should be 0 on success or a negative int on failure.
+ * The special return values 0x01 (EFI_DEV_END_INSTANCE) and 0xFF
+ * (EFI_DEV_END_ENTIRE) signal the end of the device path, only
+ * parse_end_path() is supposed to return this.
+ *
+ * Be sure to validate the node length and contents before commencing the
+ * search for a device.
+ */
+
+static long __init parse_end_path(struct efi_dev_path *node,
+ struct device *parent, struct device **child)
+{
+ if (node->length != 4)
+ return -EINVAL;
+ if (node->sub_type != EFI_DEV_END_INSTANCE &&
+ node->sub_type != EFI_DEV_END_ENTIRE)
+ return -EINVAL;
+ if (!parent)
+ return -ENODEV;
+
+ *child = get_device(parent);
+ return node->sub_type;
+}
+
+/**
+ * efi_get_device_by_path - find device by EFI Device Path
+ * @node: EFI Device Path
+ * @len: maximum length of EFI Device Path in bytes
+ *
+ * Parse a series of EFI Device Path nodes at @node and find the corresponding
+ * device. If the device was found, its reference count is incremented and a
+ * pointer to it is returned. The caller needs to drop the reference with
+ * put_device() after use. The @node pointer is updated to point to the
+ * location immediately after the "End of Hardware Device Path" node.
+ *
+ * If another Device Path instance follows, @len is decremented by the number
+ * of bytes consumed. Otherwise @len is set to %0.
+ *
+ * If a Device Path node is malformed or its corresponding device is not found,
+ * @node is updated to point to this offending node and an ERR_PTR is returned.
+ *
+ * If @len is initially %0, the function returns %NULL. Thus, to iterate over
+ * all instances in a path, the following idiom may be used:
+ *
+ * while (!IS_ERR_OR_NULL(dev = efi_get_device_by_path(&node, &len))) {
+ * // do something with dev
+ * put_device(dev);
+ * }
+ * if (IS_ERR(dev))
+ * // report error
+ *
+ * Devices can only be found if they're already instantiated. Most buses
+ * instantiate devices in the "subsys" initcall level, hence the earliest
+ * initcall level in which this function should be called is "fs".
+ *
+ * Returns the device on success or
+ * %ERR_PTR(-ENODEV) if no device was found,
+ * %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len,
+ * %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented.
+ */
+struct device * __init efi_get_device_by_path(struct efi_dev_path **node,
+ size_t *len)
+{
+ struct device *parent = NULL, *child;
+ long ret = 0;
+
+ if (!*len)
+ return NULL;
+
+ while (!ret) {
+ if (*len < 4 || *len < (*node)->length)
+ ret = -EINVAL;
+ else if ((*node)->type == EFI_DEV_ACPI &&
+ (*node)->sub_type == EFI_DEV_BASIC_ACPI)
+ ret = parse_acpi_path(*node, parent, &child);
+ else if ((*node)->type == EFI_DEV_HW &&
+ (*node)->sub_type == EFI_DEV_PCI)
+ ret = parse_pci_path(*node, parent, &child);
+ else if (((*node)->type == EFI_DEV_END_PATH ||
+ (*node)->type == EFI_DEV_END_PATH2))
+ ret = parse_end_path(*node, parent, &child);
+ else
+ ret = -ENOTSUPP;
+
+ put_device(parent);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ parent = child;
+ *node = (void *)*node + (*node)->length;
+ *len -= (*node)->length;
+ }
+
+ if (ret == EFI_DEV_END_ENTIRE)
+ *len = 0;
+
+ return child;
+}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 1ac199cd75e7..92914801e388 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -23,7 +23,10 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/io.h>
+#include <linux/kexec.h>
#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/ucs2_string.h>
@@ -48,6 +51,7 @@ struct efi __read_mostly efi = {
.esrt = EFI_INVALID_TABLE_ADDR,
.properties_table = EFI_INVALID_TABLE_ADDR,
.mem_attr_table = EFI_INVALID_TABLE_ADDR,
+ .rng_seed = EFI_INVALID_TABLE_ADDR,
};
EXPORT_SYMBOL(efi);
@@ -259,8 +263,10 @@ static __init int efivar_ssdt_load(void)
}
data = kmalloc(size, GFP_KERNEL);
- if (!data)
+ if (!data) {
+ ret = -ENOMEM;
goto free_entry;
+ }
ret = efivar_entry_get(entry, NULL, &size, data);
if (ret) {
@@ -438,6 +444,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
{EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
+ {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
{NULL_GUID, NULL, NULL},
};
@@ -499,6 +506,29 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
pr_cont("\n");
set_bit(EFI_CONFIG_TABLES, &efi.flags);
+ if (efi.rng_seed != EFI_INVALID_TABLE_ADDR) {
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ seed = early_memremap(efi.rng_seed, sizeof(*seed));
+ if (seed != NULL) {
+ size = seed->size;
+ early_memunmap(seed, sizeof(*seed));
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = early_memremap(efi.rng_seed,
+ sizeof(*seed) + size);
+ if (seed != NULL) {
+ add_device_randomness(seed->bits, seed->size);
+ early_memunmap(seed, sizeof(*seed) + size);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ }
+
/* Parse the EFI Properties table if it exists */
if (efi.properties_table != EFI_INVALID_TABLE_ADDR) {
efi_properties_table_t *tbl;
@@ -822,3 +852,47 @@ int efi_status_to_err(efi_status_t status)
return err;
}
+
+#ifdef CONFIG_KEXEC
+static int update_efi_random_seed(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ struct linux_efi_random_seed *seed;
+ u32 size = 0;
+
+ if (!kexec_in_progress)
+ return NOTIFY_DONE;
+
+ seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB);
+ if (seed != NULL) {
+ size = min(seed->size, 32U);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ if (size > 0) {
+ seed = memremap(efi.rng_seed, sizeof(*seed) + size,
+ MEMREMAP_WB);
+ if (seed != NULL) {
+ seed->size = size;
+ get_random_bytes(seed->bits, seed->size);
+ memunmap(seed);
+ } else {
+ pr_err("Could not map UEFI random seed!\n");
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block efi_random_seed_nb = {
+ .notifier_call = update_efi_random_seed,
+};
+
+static int register_update_efi_random_seed(void)
+{
+ if (efi.rng_seed == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ return register_reboot_notifier(&efi_random_seed_nb);
+}
+late_initcall(register_update_efi_random_seed);
+#endif
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 5e23e2d305e7..6621b13c370f 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -36,11 +36,11 @@ arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
$(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
-lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
+lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \
$(patsubst %.c,lib-%.o,$(arm-deps))
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64-stub.o random.o
+lib-$(CONFIG_ARM64) += arm64-stub.o
CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
#
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 993aa56755f6..b4f7d78f9e8b 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -340,6 +340,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
if (status != EFI_SUCCESS)
pr_efi_err(sys_table, "Failed initrd from command line!\n");
+ efi_random_get_seed(sys_table);
+
new_fdt_addr = fdt_addr;
status = allocate_new_fdt_and_exit_boot(sys_table, handle,
&new_fdt_addr, dram_base + MAX_FDT_OFFSET,
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index aded10662020..757badc1debb 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -32,15 +32,6 @@
static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
-/*
- * Allow the platform to override the allocation granularity: this allows
- * systems that have the capability to run with a larger page size to deal
- * with the allocations for initrd and fdt more efficiently.
- */
-#ifndef EFI_ALLOC_ALIGN
-#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
-#endif
-
#define EFI_MMAP_NR_SLACK_SLOTS 8
struct file_info {
@@ -186,14 +177,16 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
goto fail;
/*
- * Enforce minimum alignment that EFI requires when requesting
- * a specific address. We are doing page-based allocations,
- * so we must be aligned to a page.
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
*/
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
- nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
again:
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
@@ -208,7 +201,7 @@ again:
continue;
start = desc->phys_addr;
- end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
if (end > max)
end = max;
@@ -278,14 +271,16 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
goto fail;
/*
- * Enforce minimum alignment that EFI requires when requesting
- * a specific address. We are doing page-based allocations,
- * so we must be aligned to a page.
+ * Enforce minimum alignment that EFI or Linux requires when
+ * requesting a specific address. We are doing page-based (or
+ * larger) allocations, and both the address and size must meet
+ * alignment constraints.
*/
if (align < EFI_ALLOC_ALIGN)
align = EFI_ALLOC_ALIGN;
- nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE;
+ size = round_up(size, EFI_ALLOC_ALIGN);
+ nr_pages = size / EFI_PAGE_SIZE;
for (i = 0; i < map_size / desc_size; i++) {
efi_memory_desc_t *desc;
unsigned long m = (unsigned long)map;
@@ -300,7 +295,7 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
continue;
start = desc->phys_addr;
- end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT);
+ end = start + desc->num_pages * EFI_PAGE_SIZE;
/*
* Don't allocate at 0x0. It will confuse code that
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index ee49cd23ee63..b98824e3800a 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -15,6 +15,15 @@
*/
#undef __init
+/*
+ * Allow the platform to override the allocation granularity: this allows
+ * systems that have the capability to run with a larger page size to deal
+ * with the allocations for initrd and fdt more efficiently.
+ */
+#ifndef EFI_ALLOC_ALIGN
+#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+#endif
+
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
@@ -62,4 +71,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg);
+
#endif
diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
index 0c9f58c5ba50..7e72954d5860 100644
--- a/drivers/firmware/efi/libstub/random.c
+++ b/drivers/firmware/efi/libstub/random.c
@@ -8,6 +8,7 @@
*/
#include <linux/efi.h>
+#include <linux/log2.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -41,21 +42,23 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg,
*/
static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
unsigned long size,
- unsigned long align)
+ unsigned long align_shift)
{
- u64 start, end;
+ unsigned long align = 1UL << align_shift;
+ u64 first_slot, last_slot, region_end;
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
- start = round_up(md->phys_addr, align);
- end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size,
- align);
+ region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1);
- if (start > end)
+ first_slot = round_up(md->phys_addr, align);
+ last_slot = round_down(region_end - size + 1, align);
+
+ if (first_slot > last_slot)
return 0;
- return (end - start + 1) / align;
+ return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1;
}
/*
@@ -98,7 +101,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
efi_memory_desc_t *md = (void *)memory_map + map_offset;
unsigned long slots;
- slots = get_entry_num_slots(md, size, align);
+ slots = get_entry_num_slots(md, size, ilog2(align));
MD_NUM_SLOTS(md) = slots;
total_slots += slots;
}
@@ -141,3 +144,51 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
return status;
}
+
+#define RANDOM_SEED_SIZE 32
+
+efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg)
+{
+ efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID;
+ efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW;
+ efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID;
+ struct efi_rng_protocol *rng;
+ struct linux_efi_random_seed *seed;
+ efi_status_t status;
+
+ status = efi_call_early(locate_protocol, &rng_proto, NULL,
+ (void **)&rng);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+ sizeof(*seed) + RANDOM_SEED_SIZE,
+ (void **)&seed);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ status = rng->get_rng(rng, &rng_algo_raw, RANDOM_SEED_SIZE,
+ seed->bits);
+ if (status == EFI_UNSUPPORTED)
+ /*
+ * Use whatever algorithm we have available if the raw algorithm
+ * is not implemented.
+ */
+ status = rng->get_rng(rng, NULL, RANDOM_SEED_SIZE,
+ seed->bits);
+
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ seed->size = RANDOM_SEED_SIZE;
+ status = efi_call_early(install_configuration_table, &rng_table_guid,
+ seed);
+ if (status != EFI_SUCCESS)
+ goto err_freepool;
+
+ return EFI_SUCCESS;
+
+err_freepool:
+ efi_call_early(free_pool, seed);
+ return status;
+}
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index f61bb52be318..8cd578f62059 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -8,7 +8,6 @@
*
*/
-#include <linux/version.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -156,7 +155,7 @@ static long efi_runtime_get_variable(unsigned long arg)
{
struct efi_getvariable __user *getvariable_user;
struct efi_getvariable getvariable;
- unsigned long datasize, prev_datasize, *dz;
+ unsigned long datasize = 0, prev_datasize, *dz;
efi_guid_t vendor_guid, *vd = NULL;
efi_status_t status;
efi_char16_t *name = NULL;
@@ -266,14 +265,10 @@ static long efi_runtime_set_variable(unsigned long arg)
return rv;
}
- data = kmalloc(setvariable.data_size, GFP_KERNEL);
- if (!data) {
+ data = memdup_user(setvariable.data, setvariable.data_size);
+ if (IS_ERR(data)) {
kfree(name);
- return -ENOMEM;
- }
- if (copy_from_user(data, setvariable.data, setvariable.data_size)) {
- rv = -EFAULT;
- goto out;
+ return PTR_ERR(data);
}
status = efi.set_variable(name, &vendor_guid,
@@ -429,7 +424,7 @@ static long efi_runtime_get_nextvariablename(unsigned long arg)
efi_guid_t *vd = NULL;
efi_guid_t vendor_guid;
efi_char16_t *name = NULL;
- int rv;
+ int rv = 0;
getnextvariablename_user = (struct efi_getnextvariablename __user *)arg;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 57194471f8cd..b8f403faadbb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -972,7 +972,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
if (busywait_stop(timeout_us, cpu))
break;
- cpu_relax_lowlatency();
+ cpu_relax();
} while (!need_resched());
return false;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index a6fc1bdc48af..401006b4c6a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -35,31 +35,22 @@
#include "i915_drv.h"
#include "i915_trace.h"
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
- if (!mutex_is_locked(mutex))
+ switch (mutex_trylock_recursive(&dev->struct_mutex)) {
+ case MUTEX_TRYLOCK_FAILED:
return false;
-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
-
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
+ case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
- } else {
- *unlock = true;
+ return true;
}
- return true;
+ BUG();
}
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 192b2d3a79cb..ab1dd020eb04 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -18,33 +18,24 @@
#include "msm_drv.h"
#include "msm_gem.h"
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
{
- if (!mutex_is_locked(mutex))
+ switch (mutex_trylock_recursive(&dev->struct_mutex)) {
+ case MUTEX_TRYLOCK_FAILED:
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
- return mutex->owner == task;
-#else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
- return false;
-#endif
-}
+ case MUTEX_TRYLOCK_SUCCESS:
+ *unlock = true;
+ return true;
-static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- if (!mutex_trylock(&dev->struct_mutex)) {
- if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return false;
+ case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
- } else {
- *unlock = true;
+ return true;
}
- return true;
+ BUG();
}
-
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
diff --git a/drivers/idle/Kconfig b/drivers/idle/Kconfig
index 4732dfc15447..55bcf803841e 100644
--- a/drivers/idle/Kconfig
+++ b/drivers/idle/Kconfig
@@ -8,20 +8,3 @@ config INTEL_IDLE
native Intel hardware idle features. The acpi_idle driver
can be configured at the same time, in order to handle
processors intel_idle does not support.
-
-menu "Memory power savings"
-depends on X86_64
-
-config I7300_IDLE_IOAT_CHANNEL
- bool
-
-config I7300_IDLE
- tristate "Intel chipset idle memory power saving driver"
- select I7300_IDLE_IOAT_CHANNEL
- help
- Enable memory power savings when idle with certain Intel server
- chipsets. The chipset must have I/O AT support, such as the
- Intel 7300. The power savings depends on the type and quantity of
- DRAM devices.
-
-endmenu
diff --git a/drivers/idle/Makefile b/drivers/idle/Makefile
index 23d295cf10f2..0007111d73e9 100644
--- a/drivers/idle/Makefile
+++ b/drivers/idle/Makefile
@@ -1,3 +1,2 @@
-obj-$(CONFIG_I7300_IDLE) += i7300_idle.o
obj-$(CONFIG_INTEL_IDLE) += intel_idle.o
diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c
deleted file mode 100644
index ffeebc7e9f1c..000000000000
--- a/drivers/idle/i7300_idle.c
+++ /dev/null
@@ -1,612 +0,0 @@
-/*
- * (C) Copyright 2008 Intel Corporation
- * Authors:
- * Andy Henroid <andrew.d.henroid@intel.com>
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- */
-
-/*
- * Save DIMM power on Intel 7300-based platforms when all CPUs/cores
- * are idle, using the DIMM thermal throttling capability.
- *
- * This driver depends on the Intel integrated DMA controller (I/O AT).
- * If the driver for I/O AT (drivers/dma/ioatdma*) is also enabled,
- * this driver should work cooperatively.
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/sched.h>
-#include <linux/notifier.h>
-#include <linux/cpumask.h>
-#include <linux/ktime.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/stop_machine.h>
-#include <linux/i7300_idle.h>
-
-#include <asm/idle.h>
-
-#include "../dma/ioat/hw.h"
-#include "../dma/ioat/registers.h"
-
-#define I7300_IDLE_DRIVER_VERSION "1.55"
-#define I7300_PRINT "i7300_idle:"
-
-#define MAX_STOP_RETRIES 10
-
-static int debug;
-module_param_named(debug, debug, uint, 0644);
-MODULE_PARM_DESC(debug, "Enable debug printks in this driver");
-
-static int forceload;
-module_param_named(forceload, forceload, uint, 0644);
-MODULE_PARM_DESC(debug, "Enable driver testing on unvalidated i5000");
-
-#define dprintk(fmt, arg...) \
- do { if (debug) printk(KERN_INFO I7300_PRINT fmt, ##arg); } while (0)
-
-/*
- * Value to set THRTLOW to when initiating throttling
- * 0 = No throttling
- * 1 = Throttle when > 4 activations per eval window (Maximum throttling)
- * 2 = Throttle when > 8 activations
- * 168 = Throttle when > 672 activations (Minimum throttling)
- */
-#define MAX_THROTTLE_LOW_LIMIT 168
-static uint throttle_low_limit = 1;
-module_param_named(throttle_low_limit, throttle_low_limit, uint, 0644);
-MODULE_PARM_DESC(throttle_low_limit,
- "Value for THRTLOWLM activation field "
- "(0 = disable throttle, 1 = Max throttle, 168 = Min throttle)");
-
-/*
- * simple invocation and duration statistics
- */
-static unsigned long total_starts;
-static unsigned long total_us;
-
-#ifdef DEBUG
-static unsigned long past_skip;
-#endif
-
-static struct pci_dev *fbd_dev;
-
-static raw_spinlock_t i7300_idle_lock;
-static int i7300_idle_active;
-
-static u8 i7300_idle_thrtctl_saved;
-static u8 i7300_idle_thrtlow_saved;
-static u32 i7300_idle_mc_saved;
-
-static cpumask_var_t idle_cpumask;
-static ktime_t start_ktime;
-static unsigned long avg_idle_us;
-
-static struct dentry *debugfs_dir;
-
-/* Begin: I/O AT Helper routines */
-
-#define IOAT_CHANBASE(ioat_ctl, chan) (ioat_ctl + 0x80 + 0x80 * chan)
-/* Snoop control (disable snoops when coherency is not important) */
-#define IOAT_DESC_SADDR_SNP_CTL (1UL << 1)
-#define IOAT_DESC_DADDR_SNP_CTL (1UL << 2)
-
-static struct pci_dev *ioat_dev;
-static struct ioat_dma_descriptor *ioat_desc; /* I/O AT desc & data (1 page) */
-static unsigned long ioat_desc_phys;
-static u8 *ioat_iomap; /* I/O AT memory-mapped control regs (aka CB_BAR) */
-static u8 *ioat_chanbase;
-
-/* Start I/O AT memory copy */
-static int i7300_idle_ioat_start(void)
-{
- u32 err;
- /* Clear error (due to circular descriptor pointer) */
- err = readl(ioat_chanbase + IOAT_CHANERR_OFFSET);
- if (err)
- writel(err, ioat_chanbase + IOAT_CHANERR_OFFSET);
-
- writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- return 0;
-}
-
-/* Stop I/O AT memory copy */
-static void i7300_idle_ioat_stop(void)
-{
- int i;
- u64 sts;
-
- for (i = 0; i < MAX_STOP_RETRIES; i++) {
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- udelay(10);
-
- sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (sts != IOAT_CHANSTS_ACTIVE)
- break;
-
- }
-
- if (i == MAX_STOP_RETRIES) {
- dprintk("failed to stop I/O AT after %d retries\n",
- MAX_STOP_RETRIES);
- }
-}
-
-/* Test I/O AT by copying 1024 byte from 2k to 1k */
-static int __init i7300_idle_ioat_selftest(u8 *ctl,
- struct ioat_dma_descriptor *desc, unsigned long desc_phys)
-{
- u64 chan_sts;
-
- memset(desc, 0, 2048);
- memset((u8 *) desc + 2048, 0xab, 1024);
-
- desc[0].size = 1024;
- desc[0].ctl = 0;
- desc[0].src_addr = desc_phys + 2048;
- desc[0].dst_addr = desc_phys + 1024;
- desc[0].next = 0;
-
- writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- udelay(1000);
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (chan_sts != IOAT_CHANSTS_DONE) {
- /* Not complete, reset the channel */
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
- return -1;
- }
-
- if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab ||
- *(u32 *) ((u8 *) desc + 2044) != 0xabababab) {
- dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n",
- *(u32 *) ((u8 *) desc + 2048),
- *(u32 *) ((u8 *) desc + 1024),
- *(u32 *) ((u8 *) desc + 3072));
- return -1;
- }
- return 0;
-}
-
-static struct device dummy_dma_dev = {
- .init_name = "fallback device",
- .coherent_dma_mask = DMA_BIT_MASK(64),
- .dma_mask = &dummy_dma_dev.coherent_dma_mask,
-};
-
-/* Setup and initialize I/O AT */
-/* This driver needs I/O AT as the throttling takes effect only when there is
- * some memory activity. We use I/O AT to set up a dummy copy, while all CPUs
- * go idle and memory is throttled.
- */
-static int __init i7300_idle_ioat_init(void)
-{
- u8 ver, chan_count, ioat_chan;
- u16 chan_ctl;
-
- ioat_iomap = (u8 *) ioremap_nocache(pci_resource_start(ioat_dev, 0),
- pci_resource_len(ioat_dev, 0));
-
- if (!ioat_iomap) {
- printk(KERN_ERR I7300_PRINT "failed to map I/O AT registers\n");
- goto err_ret;
- }
-
- ver = readb(ioat_iomap + IOAT_VER_OFFSET);
- if (ver != IOAT_VER_1_2) {
- printk(KERN_ERR I7300_PRINT "unknown I/O AT version (%u.%u)\n",
- ver >> 4, ver & 0xf);
- goto err_unmap;
- }
-
- chan_count = readb(ioat_iomap + IOAT_CHANCNT_OFFSET);
- if (!chan_count) {
- printk(KERN_ERR I7300_PRINT "unexpected # of I/O AT channels "
- "(%u)\n",
- chan_count);
- goto err_unmap;
- }
-
- ioat_chan = chan_count - 1;
- ioat_chanbase = IOAT_CHANBASE(ioat_iomap, ioat_chan);
-
- chan_ctl = readw(ioat_chanbase + IOAT_CHANCTRL_OFFSET);
- if (chan_ctl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
- printk(KERN_ERR I7300_PRINT "channel %d in use\n", ioat_chan);
- goto err_unmap;
- }
-
- writew(IOAT_CHANCTRL_CHANNEL_IN_USE,
- ioat_chanbase + IOAT_CHANCTRL_OFFSET);
-
- ioat_desc = (struct ioat_dma_descriptor *)dma_alloc_coherent(
- &dummy_dma_dev, 4096,
- (dma_addr_t *)&ioat_desc_phys, GFP_KERNEL);
- if (!ioat_desc) {
- printk(KERN_ERR I7300_PRINT "failed to allocate I/O AT desc\n");
- goto err_mark_unused;
- }
-
- writel(ioat_desc_phys & 0xffffffffUL,
- ioat_chanbase + IOAT1_CHAINADDR_OFFSET_LOW);
- writel(ioat_desc_phys >> 32,
- ioat_chanbase + IOAT1_CHAINADDR_OFFSET_HIGH);
-
- if (i7300_idle_ioat_selftest(ioat_iomap, ioat_desc, ioat_desc_phys)) {
- printk(KERN_ERR I7300_PRINT "I/O AT self-test failed\n");
- goto err_free;
- }
-
- /* Setup circular I/O AT descriptor chain */
- ioat_desc[0].ctl = IOAT_DESC_SADDR_SNP_CTL | IOAT_DESC_DADDR_SNP_CTL;
- ioat_desc[0].src_addr = ioat_desc_phys + 2048;
- ioat_desc[0].dst_addr = ioat_desc_phys + 3072;
- ioat_desc[0].size = 128;
- ioat_desc[0].next = ioat_desc_phys + sizeof(struct ioat_dma_descriptor);
-
- ioat_desc[1].ctl = ioat_desc[0].ctl;
- ioat_desc[1].src_addr = ioat_desc[0].src_addr;
- ioat_desc[1].dst_addr = ioat_desc[0].dst_addr;
- ioat_desc[1].size = ioat_desc[0].size;
- ioat_desc[1].next = ioat_desc_phys;
-
- return 0;
-
-err_free:
- dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
-err_mark_unused:
- writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
-err_unmap:
- iounmap(ioat_iomap);
-err_ret:
- return -ENODEV;
-}
-
-/* Cleanup I/O AT */
-static void __exit i7300_idle_ioat_exit(void)
-{
- int i;
- u64 chan_sts;
-
- i7300_idle_ioat_stop();
-
- /* Wait for a while for the channel to halt before releasing */
- for (i = 0; i < MAX_STOP_RETRIES; i++) {
- writeb(IOAT_CHANCMD_RESET,
- ioat_chanbase + IOAT1_CHANCMD_OFFSET);
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- if (chan_sts != IOAT_CHANSTS_ACTIVE) {
- writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET);
- break;
- }
- udelay(1000);
- }
-
- chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) &
- IOAT_CHANSTS_STATUS;
-
- /*
- * We tried to reset multiple times. If IO A/T channel is still active
- * flag an error and return without cleanup. Memory leak is better
- * than random corruption in that extreme error situation.
- */
- if (chan_sts == IOAT_CHANSTS_ACTIVE) {
- printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels."
- " Not freeing resources\n");
- return;
- }
-
- dma_free_coherent(&dummy_dma_dev, 4096, (void *)ioat_desc, 0);
- iounmap(ioat_iomap);
-}
-
-/* End: I/O AT Helper routines */
-
-#define DIMM_THRTLOW 0x64
-#define DIMM_THRTCTL 0x67
-#define DIMM_THRTCTL_THRMHUNT (1UL << 0)
-#define DIMM_MC 0x40
-#define DIMM_GTW_MODE (1UL << 17)
-#define DIMM_GBLACT 0x60
-
-/*
- * Keep track of an exponential-decaying average of recent idle durations.
- * The latest duration gets DURATION_WEIGHT_PCT percentage weight
- * in this average, with the old average getting the remaining weight.
- *
- * High weights emphasize recent history, low weights include long history.
- */
-#define DURATION_WEIGHT_PCT 55
-
-/*
- * When the decaying average of recent durations or the predicted duration
- * of the next timer interrupt is shorter than duration_threshold, the
- * driver will decline to throttle.
- */
-#define DURATION_THRESHOLD_US 100
-
-
-/* Store DIMM thermal throttle configuration */
-static int i7300_idle_thrt_save(void)
-{
- u32 new_mc_val;
- u8 gblactlm;
-
- pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &i7300_idle_thrtctl_saved);
- pci_read_config_byte(fbd_dev, DIMM_THRTLOW, &i7300_idle_thrtlow_saved);
- pci_read_config_dword(fbd_dev, DIMM_MC, &i7300_idle_mc_saved);
- /*
- * Make sure we have Global Throttling Window Mode set to have a
- * "short" window. This (mostly) works around an issue where
- * throttling persists until the end of the global throttling window
- * size. On the tested system, this was resulting in a maximum of
- * 64 ms to exit throttling (average 32 ms). The actual numbers
- * depends on system frequencies. Setting the short window reduces
- * this by a factor of 4096.
- *
- * We will only do this only if the system is set for
- * unlimited-activations while in open-loop throttling (i.e., when
- * Global Activation Throttle Limit is zero).
- */
- pci_read_config_byte(fbd_dev, DIMM_GBLACT, &gblactlm);
- dprintk("thrtctl_saved = 0x%02x, thrtlow_saved = 0x%02x\n",
- i7300_idle_thrtctl_saved,
- i7300_idle_thrtlow_saved);
- dprintk("mc_saved = 0x%08x, gblactlm = 0x%02x\n",
- i7300_idle_mc_saved,
- gblactlm);
- if (gblactlm == 0) {
- new_mc_val = i7300_idle_mc_saved | DIMM_GTW_MODE;
- pci_write_config_dword(fbd_dev, DIMM_MC, new_mc_val);
- return 0;
- } else {
- dprintk("could not set GTW_MODE = 1 (OLTT enabled)\n");
- return -ENODEV;
- }
-}
-
-/* Restore DIMM thermal throttle configuration */
-static void i7300_idle_thrt_restore(void)
-{
- pci_write_config_dword(fbd_dev, DIMM_MC, i7300_idle_mc_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
-}
-
-/* Enable DIMM thermal throttling */
-static void i7300_idle_start(void)
-{
- u8 new_ctl;
- u8 limit;
-
- new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-
- limit = throttle_low_limit;
- if (unlikely(limit > MAX_THROTTLE_LOW_LIMIT))
- limit = MAX_THROTTLE_LOW_LIMIT;
-
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, limit);
-
- new_ctl = i7300_idle_thrtctl_saved | DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-}
-
-/* Disable DIMM thermal throttling */
-static void i7300_idle_stop(void)
-{
- u8 new_ctl;
- u8 got_ctl;
-
- new_ctl = i7300_idle_thrtctl_saved & ~DIMM_THRTCTL_THRMHUNT;
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, new_ctl);
-
- pci_write_config_byte(fbd_dev, DIMM_THRTLOW, i7300_idle_thrtlow_saved);
- pci_write_config_byte(fbd_dev, DIMM_THRTCTL, i7300_idle_thrtctl_saved);
- pci_read_config_byte(fbd_dev, DIMM_THRTCTL, &got_ctl);
- WARN_ON_ONCE(got_ctl != i7300_idle_thrtctl_saved);
-}
-
-
-/*
- * i7300_avg_duration_check()
- * return 0 if the decaying average of recent idle durations is
- * more than DURATION_THRESHOLD_US
- */
-static int i7300_avg_duration_check(void)
-{
- if (avg_idle_us >= DURATION_THRESHOLD_US)
- return 0;
-
-#ifdef DEBUG
- past_skip++;
-#endif
- return 1;
-}
-
-/* Idle notifier to look at idle CPUs */
-static int i7300_idle_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
-{
- unsigned long flags;
- ktime_t now_ktime;
- static ktime_t idle_begin_time;
- static int time_init = 1;
-
- if (!throttle_low_limit)
- return 0;
-
- if (unlikely(time_init)) {
- time_init = 0;
- idle_begin_time = ktime_get();
- }
-
- raw_spin_lock_irqsave(&i7300_idle_lock, flags);
- if (val == IDLE_START) {
-
- cpumask_set_cpu(smp_processor_id(), idle_cpumask);
-
- if (cpumask_weight(idle_cpumask) != num_online_cpus())
- goto end;
-
- now_ktime = ktime_get();
- idle_begin_time = now_ktime;
-
- if (i7300_avg_duration_check())
- goto end;
-
- i7300_idle_active = 1;
- total_starts++;
- start_ktime = now_ktime;
-
- i7300_idle_start();
- i7300_idle_ioat_start();
-
- } else if (val == IDLE_END) {
- cpumask_clear_cpu(smp_processor_id(), idle_cpumask);
- if (cpumask_weight(idle_cpumask) == (num_online_cpus() - 1)) {
- /* First CPU coming out of idle */
- u64 idle_duration_us;
-
- now_ktime = ktime_get();
-
- idle_duration_us = ktime_to_us(ktime_sub
- (now_ktime, idle_begin_time));
-
- avg_idle_us =
- ((100 - DURATION_WEIGHT_PCT) * avg_idle_us +
- DURATION_WEIGHT_PCT * idle_duration_us) / 100;
-
- if (i7300_idle_active) {
- ktime_t idle_ktime;
-
- idle_ktime = ktime_sub(now_ktime, start_ktime);
- total_us += ktime_to_us(idle_ktime);
-
- i7300_idle_ioat_stop();
- i7300_idle_stop();
- i7300_idle_active = 0;
- }
- }
- }
-end:
- raw_spin_unlock_irqrestore(&i7300_idle_lock, flags);
- return 0;
-}
-
-static struct notifier_block i7300_idle_nb = {
- .notifier_call = i7300_idle_notifier,
-};
-
-MODULE_DEVICE_TABLE(pci, pci_tbl);
-
-static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count,
- loff_t *off)
-{
- unsigned long *p = fp->private_data;
- char buf[32];
- int len;
-
- len = snprintf(buf, 32, "%lu\n", *p);
- return simple_read_from_buffer(ubuf, count, off, buf, len);
-}
-
-static const struct file_operations idle_fops = {
- .open = simple_open,
- .read = stats_read_ul,
- .llseek = default_llseek,
-};
-
-struct debugfs_file_info {
- void *ptr;
- char name[32];
- struct dentry *file;
-} debugfs_file_list[] = {
- {&total_starts, "total_starts", NULL},
- {&total_us, "total_us", NULL},
-#ifdef DEBUG
- {&past_skip, "past_skip", NULL},
-#endif
- {NULL, "", NULL}
- };
-
-static int __init i7300_idle_init(void)
-{
- raw_spin_lock_init(&i7300_idle_lock);
- total_us = 0;
-
- if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
- return -ENODEV;
-
- if (i7300_idle_thrt_save())
- return -ENODEV;
-
- if (i7300_idle_ioat_init())
- return -ENODEV;
-
- if (!zalloc_cpumask_var(&idle_cpumask, GFP_KERNEL))
- return -ENOMEM;
-
- debugfs_dir = debugfs_create_dir("i7300_idle", NULL);
- if (debugfs_dir) {
- int i = 0;
-
- while (debugfs_file_list[i].ptr != NULL) {
- debugfs_file_list[i].file = debugfs_create_file(
- debugfs_file_list[i].name,
- S_IRUSR,
- debugfs_dir,
- debugfs_file_list[i].ptr,
- &idle_fops);
- i++;
- }
- }
-
- idle_notifier_register(&i7300_idle_nb);
-
- printk(KERN_INFO "i7300_idle: loaded v%s\n", I7300_IDLE_DRIVER_VERSION);
- return 0;
-}
-
-static void __exit i7300_idle_exit(void)
-{
- idle_notifier_unregister(&i7300_idle_nb);
- free_cpumask_var(idle_cpumask);
-
- if (debugfs_dir) {
- int i = 0;
-
- while (debugfs_file_list[i].file != NULL) {
- debugfs_remove(debugfs_file_list[i].file);
- i++;
- }
-
- debugfs_remove(debugfs_dir);
- }
- i7300_idle_thrt_restore();
- i7300_idle_ioat_exit();
-}
-
-module_init(i7300_idle_init);
-module_exit(i7300_idle_exit);
-
-MODULE_AUTHOR("Andy Henroid <andrew.d.henroid@intel.com>");
-MODULE_DESCRIPTION("Intel Chipset DIMM Idle Power Saving Driver v"
- I7300_IDLE_DRIVER_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index d8376c2d18b3..c66c273dfd8a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4688,25 +4688,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
}
}
-static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
- unsigned long action, void *v)
+static int intel_iommu_cpu_dead(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)v;
-
- switch (action) {
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_all_cpu_cached_iovas(cpu);
- flush_unmaps_timeout(cpu);
- break;
- }
- return NOTIFY_OK;
+ free_all_cpu_cached_iovas(cpu);
+ flush_unmaps_timeout(cpu);
+ return 0;
}
-static struct notifier_block intel_iommu_cpu_nb = {
- .notifier_call = intel_iommu_cpu_notifier,
-};
-
static ssize_t intel_iommu_show_version(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -4855,8 +4843,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
- register_hotcpu_notifier(&intel_iommu_cpu_nb);
-
+ cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
+ intel_iommu_cpu_dead);
intel_iommu_enabled = 1;
return 0;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bc0af3307bbf..ae96731cd2fb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -211,6 +211,10 @@ config XTENSA_MX
bool
select IRQ_DOMAIN
+config XILINX_INTC
+ bool
+ select IRQ_DOMAIN
+
config IRQ_CROSSBAR
bool
help
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e4dbfc85abdb..0e55d94065bf 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_TS4800_IRQ) += irq-ts4800.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o
+obj-$(CONFIG_XILINX_INTC) += irq-xilinx-intc.o
obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o
obj-$(CONFIG_BCM6345_L1_IRQ) += irq-bcm6345-l1.o
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 353c54986211..c2662a1bfdd3 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -215,6 +215,31 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
+static void bcm7038_l1_cpu_offline(struct irq_data *d)
+{
+ struct cpumask *mask = irq_data_get_affinity_mask(d);
+ int cpu = smp_processor_id();
+ cpumask_t new_affinity;
+
+ /* This CPU was not on the affinity mask */
+ if (!cpumask_test_cpu(cpu, mask))
+ return;
+
+ if (cpumask_weight(mask) > 1) {
+ /*
+ * Multiple CPU affinity, remove this CPU from the affinity
+ * mask
+ */
+ cpumask_copy(&new_affinity, mask);
+ cpumask_clear_cpu(cpu, &new_affinity);
+ } else {
+ /* Only CPU, put on the lowest online CPU */
+ cpumask_clear(&new_affinity);
+ cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
+ }
+ irq_set_affinity_locked(d, &new_affinity, false);
+}
+
static int __init bcm7038_l1_init_one(struct device_node *dn,
unsigned int idx,
struct bcm7038_l1_chip *intc)
@@ -266,6 +291,7 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
.irq_set_affinity = bcm7038_l1_set_affinity,
+ .irq_cpu_offline = bcm7038_l1_cpu_offline,
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c5dee300e8a3..69b040f47d56 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -37,7 +37,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
-#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/exception.h>
@@ -196,7 +195,7 @@ typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
{
- cmd->raw_cmd[0] &= ~0xffUL;
+ cmd->raw_cmd[0] &= ~0xffULL;
cmd->raw_cmd[0] |= cmd_nr;
}
@@ -208,43 +207,43 @@ static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
{
- cmd->raw_cmd[1] &= ~0xffffffffUL;
+ cmd->raw_cmd[1] &= ~0xffffffffULL;
cmd->raw_cmd[1] |= id;
}
static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
{
- cmd->raw_cmd[1] &= 0xffffffffUL;
+ cmd->raw_cmd[1] &= 0xffffffffULL;
cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
}
static void its_encode_size(struct its_cmd_block *cmd, u8 size)
{
- cmd->raw_cmd[1] &= ~0x1fUL;
+ cmd->raw_cmd[1] &= ~0x1fULL;
cmd->raw_cmd[1] |= size & 0x1f;
}
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
- cmd->raw_cmd[2] &= ~0xffffffffffffUL;
- cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
+ cmd->raw_cmd[2] &= ~0xffffffffffffULL;
+ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00ULL;
}
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
{
- cmd->raw_cmd[2] &= ~(1UL << 63);
+ cmd->raw_cmd[2] &= ~(1ULL << 63);
cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
}
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
- cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
- cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
+ cmd->raw_cmd[2] &= ~(0xffffffffULL << 16);
+ cmd->raw_cmd[2] |= (target_addr & (0xffffffffULL << 16));
}
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
{
- cmd->raw_cmd[2] &= ~0xffffUL;
+ cmd->raw_cmd[2] &= ~0xffffULL;
cmd->raw_cmd[2] |= col;
}
@@ -433,7 +432,7 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
* the ITS.
*/
if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
- __flush_dcache_area(cmd, sizeof(*cmd));
+ gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
else
dsb(ishst);
}
@@ -602,7 +601,7 @@ static void lpi_set_config(struct irq_data *d, bool enable)
* Humpf...
*/
if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
- __flush_dcache_area(cfg, sizeof(*cfg));
+ gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
else
dsb(ishst);
its_send_inv(its_dev, id);
@@ -657,8 +656,8 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
its = its_dev->its;
addr = its->phys_base + GITS_TRANSLATER;
- msg->address_lo = addr & ((1UL << 32) - 1);
- msg->address_hi = addr >> 32;
+ msg->address_lo = lower_32_bits(addr);
+ msg->address_hi = upper_32_bits(addr);
msg->data = its_get_event_id(d);
iommu_dma_map_msi_msg(d->irq, msg);
@@ -817,7 +816,7 @@ static int __init its_alloc_lpi_tables(void)
LPI_PROPBASE_SZ);
/* Make sure the GIC will observe the written configuration */
- __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
+ gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
return 0;
}
@@ -836,7 +835,7 @@ static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
{
u32 idx = baser - its->tables;
- return readq_relaxed(its->base + GITS_BASER + (idx << 3));
+ return gits_read_baser(its->base + GITS_BASER + (idx << 3));
}
static void its_write_baser(struct its_node *its, struct its_baser *baser,
@@ -844,7 +843,7 @@ static void its_write_baser(struct its_node *its, struct its_baser *baser,
{
u32 idx = baser - its->tables;
- writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
+ gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
baser->val = its_read_baser(its, baser);
}
@@ -910,7 +909,7 @@ retry_baser:
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
- __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
+ gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@@ -935,9 +934,9 @@ retry_baser:
}
if (val != tmp) {
- pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
+ pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
&its->phys_base, its_base_type_string[type],
- (unsigned long) val, (unsigned long) tmp);
+ val, tmp);
free_pages((unsigned long)base, order);
return -ENXIO;
}
@@ -948,7 +947,7 @@ retry_baser:
tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
- &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
+ &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
indirect ? "indirect" : "flat", (int)esz,
@@ -983,7 +982,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
* which is reported by ITS hardware times lvl1 table
* entry size.
*/
- ids -= ilog2(psz / esz);
+ ids -= ilog2(psz / (int)esz);
esz = GITS_LVL1_ENTRY_SIZE;
}
}
@@ -998,7 +997,7 @@ static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser
new_order = max_t(u32, get_order(esz << ids), new_order);
if (new_order >= MAX_ORDER) {
new_order = MAX_ORDER - 1;
- ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
+ ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
&its->phys_base, its->device_ids, ids);
}
@@ -1102,7 +1101,7 @@ static void its_cpu_init_lpis(void)
}
/* Make sure the GIC will observe the zero-ed page */
- __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
+ gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
paddr = page_to_phys(pend_page);
pr_info("CPU%d: using LPI pending table @%pa\n",
@@ -1126,8 +1125,8 @@ static void its_cpu_init_lpis(void)
GICR_PROPBASER_WaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
- writeq_relaxed(val, rbase + GICR_PROPBASER);
- tmp = readq_relaxed(rbase + GICR_PROPBASER);
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
+ tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
@@ -1139,7 +1138,7 @@ static void its_cpu_init_lpis(void)
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
GICR_PROPBASER_CACHEABILITY_MASK);
val |= GICR_PROPBASER_nC;
- writeq_relaxed(val, rbase + GICR_PROPBASER);
+ gicr_write_propbaser(val, rbase + GICR_PROPBASER);
}
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
@@ -1150,8 +1149,8 @@ static void its_cpu_init_lpis(void)
GICR_PENDBASER_InnerShareable |
GICR_PENDBASER_WaWb);
- writeq_relaxed(val, rbase + GICR_PENDBASER);
- tmp = readq_relaxed(rbase + GICR_PENDBASER);
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
+ tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
/*
@@ -1161,7 +1160,7 @@ static void its_cpu_init_lpis(void)
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
GICR_PENDBASER_CACHEABILITY_MASK);
val |= GICR_PENDBASER_nC;
- writeq_relaxed(val, rbase + GICR_PENDBASER);
+ gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
}
/* Enable LPIs */
@@ -1287,13 +1286,13 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
/* Flush Lvl2 table to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
- __flush_dcache_area(page_address(page), baser->psz);
+ gic_flush_dcache_to_poc(page_address(page), baser->psz);
table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
/* Flush Lvl1 entry to PoC if hw doesn't support coherency */
if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
- __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
+ gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
/* Ensure updated table contents are visible to ITS hardware */
dsb(sy);
@@ -1340,7 +1339,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
return NULL;
}
- __flush_dcache_area(itt, sz);
+ gic_flush_dcache_to_poc(itt, sz);
dev->its = its;
dev->itt = itt;
@@ -1717,8 +1716,8 @@ static int __init its_probe_one(struct resource *res,
(ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
GITS_CBASER_VALID);
- writeq_relaxed(baser, its->base + GITS_CBASER);
- tmp = readq_relaxed(its->base + GITS_CBASER);
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
+ tmp = gits_read_cbaser(its->base + GITS_CBASER);
if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
@@ -1730,13 +1729,13 @@ static int __init its_probe_one(struct resource *res,
baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
GITS_CBASER_CACHEABILITY_MASK);
baser |= GITS_CBASER_nC;
- writeq_relaxed(baser, its->base + GITS_CBASER);
+ gits_write_cbaser(baser, its->base + GITS_CBASER);
}
pr_info("ITS: using cache flushing for cmd queue\n");
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
}
- writeq_relaxed(0, its->base + GITS_CWRITER);
+ gits_write_cwriter(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
err = its_init_domain(handle, its);
diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c
new file mode 100644
index 000000000000..3db7ab1c9741
--- /dev/null
+++ b/drivers/irqchip/irq-xilinx-intc.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012-2013 Xilinx, Inc.
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/jump_label.h>
+#include <linux/bug.h>
+#include <linux/of_irq.h>
+
+/* No one else should require these constants, so define them locally here. */
+#define ISR 0x00 /* Interrupt Status Register */
+#define IPR 0x04 /* Interrupt Pending Register */
+#define IER 0x08 /* Interrupt Enable Register */
+#define IAR 0x0c /* Interrupt Acknowledge Register */
+#define SIE 0x10 /* Set Interrupt Enable bits */
+#define CIE 0x14 /* Clear Interrupt Enable bits */
+#define IVR 0x18 /* Interrupt Vector Register */
+#define MER 0x1c /* Master Enable Register */
+
+#define MER_ME (1<<0)
+#define MER_HIE (1<<1)
+
+static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
+
+struct xintc_irq_chip {
+ void __iomem *base;
+ struct irq_domain *root_domain;
+ u32 intr_mask;
+};
+
+static struct xintc_irq_chip *xintc_irqc;
+
+static void xintc_write(int reg, u32 data)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ iowrite32be(data, xintc_irqc->base + reg);
+ else
+ iowrite32(data, xintc_irqc->base + reg);
+}
+
+static unsigned int xintc_read(int reg)
+{
+ if (static_branch_unlikely(&xintc_is_be))
+ return ioread32be(xintc_irqc->base + reg);
+ else
+ return ioread32(xintc_irqc->base + reg);
+}
+
+static void intc_enable_or_unmask(struct irq_data *d)
+{
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("irq-xilinx: enable_or_unmask: %ld\n", d->hwirq);
+
+ /* ack level irqs because they can't be acked during
+ * ack function since the handle_level_irq function
+ * acks the irq before calling the interrupt handler
+ */
+ if (irqd_is_level_type(d))
+ xintc_write(IAR, mask);
+
+ xintc_write(SIE, mask);
+}
+
+static void intc_disable_or_mask(struct irq_data *d)
+{
+ pr_debug("irq-xilinx: disable: %ld\n", d->hwirq);
+ xintc_write(CIE, 1 << d->hwirq);
+}
+
+static void intc_ack(struct irq_data *d)
+{
+ pr_debug("irq-xilinx: ack: %ld\n", d->hwirq);
+ xintc_write(IAR, 1 << d->hwirq);
+}
+
+static void intc_mask_ack(struct irq_data *d)
+{
+ unsigned long mask = 1 << d->hwirq;
+
+ pr_debug("irq-xilinx: disable_and_ack: %ld\n", d->hwirq);
+ xintc_write(CIE, mask);
+ xintc_write(IAR, mask);
+}
+
+static struct irq_chip intc_dev = {
+ .name = "Xilinx INTC",
+ .irq_unmask = intc_enable_or_unmask,
+ .irq_mask = intc_disable_or_mask,
+ .irq_ack = intc_ack,
+ .irq_mask_ack = intc_mask_ack,
+};
+
+unsigned int xintc_get_irq(void)
+{
+ unsigned int hwirq, irq = -1;
+
+ hwirq = xintc_read(IVR);
+ if (hwirq != -1U)
+ irq = irq_find_mapping(xintc_irqc->root_domain, hwirq);
+
+ pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
+
+ return irq;
+}
+
+static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ if (xintc_irqc->intr_mask & (1 << hw)) {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_edge_irq, "edge");
+ irq_clear_status_flags(irq, IRQ_LEVEL);
+ } else {
+ irq_set_chip_and_handler_name(irq, &intc_dev,
+ handle_level_irq, "level");
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ }
+ return 0;
+}
+
+static const struct irq_domain_ops xintc_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = xintc_map,
+};
+
+static void xil_intc_irq_handler(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 pending;
+
+ chained_irq_enter(chip, desc);
+ do {
+ pending = xintc_get_irq();
+ if (pending == -1U)
+ break;
+ generic_handle_irq(pending);
+ } while (true);
+ chained_irq_exit(chip, desc);
+}
+
+static int __init xilinx_intc_of_init(struct device_node *intc,
+ struct device_node *parent)
+{
+ u32 nr_irq;
+ int ret, irq;
+ struct xintc_irq_chip *irqc;
+
+ if (xintc_irqc) {
+ pr_err("irq-xilinx: Multiple instances aren't supported\n");
+ return -EINVAL;
+ }
+
+ irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
+ if (!irqc)
+ return -ENOMEM;
+
+ xintc_irqc = irqc;
+
+ irqc->base = of_iomap(intc, 0);
+ BUG_ON(!irqc->base);
+
+ ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
+ if (ret < 0) {
+ pr_err("irq-xilinx: unable to read xlnx,num-intr-inputs\n");
+ goto err_alloc;
+ }
+
+ ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &irqc->intr_mask);
+ if (ret < 0) {
+ pr_warn("irq-xilinx: unable to read xlnx,kind-of-intr\n");
+ irqc->intr_mask = 0;
+ }
+
+ if (irqc->intr_mask >> nr_irq)
+ pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
+
+ pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n",
+ intc->full_name, nr_irq, irqc->intr_mask);
+
+
+ /*
+ * Disable all external interrupts until they are
+ * explicity requested.
+ */
+ xintc_write(IER, 0);
+
+ /* Acknowledge any pending interrupts just in case. */
+ xintc_write(IAR, 0xffffffff);
+
+ /* Turn on the Master Enable. */
+ xintc_write(MER, MER_HIE | MER_ME);
+ if (!(xintc_read(MER) & (MER_HIE | MER_ME))) {
+ static_branch_enable(&xintc_is_be);
+ xintc_write(MER, MER_HIE | MER_ME);
+ }
+
+ irqc->root_domain = irq_domain_add_linear(intc, nr_irq,
+ &xintc_irq_domain_ops, irqc);
+ if (!irqc->root_domain) {
+ pr_err("irq-xilinx: Unable to create IRQ domain\n");
+ goto err_alloc;
+ }
+
+ if (parent) {
+ irq = irq_of_parse_and_map(intc, 0);
+ if (irq) {
+ irq_set_chained_handler_and_data(irq,
+ xil_intc_irq_handler,
+ irqc);
+ } else {
+ pr_err("irq-xilinx: interrupts property not in DT\n");
+ ret = -EINVAL;
+ goto err_alloc;
+ }
+ } else {
+ irq_set_default_host(irqc->root_domain);
+ }
+
+ return 0;
+
+err_alloc:
+ xintc_irqc = NULL;
+ kfree(irqc);
+ return ret;
+
+}
+
+IRQCHIP_DECLARE(xilinx_intc_xps, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
+IRQCHIP_DECLARE(xilinx_intc_opb, "xlnx,opb-intc-1.00.c", xilinx_intc_of_init);
diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c
index 19a32280731d..601f81c04873 100644
--- a/drivers/lguest/hypercalls.c
+++ b/drivers/lguest/hypercalls.c
@@ -109,10 +109,6 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(cpu, args->arg1);
break;
- case LHCALL_TS:
- /* This sets the TS flag, as we saw used in run_guest(). */
- cpu->ts = args->arg1;
- break;
case LHCALL_HALT:
/* Similarly, this sets the halted flag for run_guest(). */
cpu->halted = 1;
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 69b3814afd2f..2356a2318034 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -43,7 +43,6 @@ struct lg_cpu {
struct mm_struct *mm; /* == tsk->mm, but that becomes NULL on exit */
u32 cr2;
- int ts;
u32 esp1;
u16 ss1;
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 6e9042e3d2a9..743253fc638f 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -247,14 +247,6 @@ unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any)
void lguest_arch_run_guest(struct lg_cpu *cpu)
{
/*
- * Remember the awfully-named TS bit? If the Guest has asked to set it
- * we set it now, so we can trap and pass that trap to the Guest if it
- * uses the FPU.
- */
- if (cpu->ts && fpregs_active())
- stts();
-
- /*
* SYSENTER is an optimized way of doing system calls. We can't allow
* it because it always jumps to privilege level 0. A normal Guest
* won't try it because we don't advertise it in CPUID, but a malicious
@@ -282,10 +274,6 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
if (boot_cpu_has(X86_FEATURE_SEP))
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
- /* Clear the host TS bit if it was set above. */
- if (cpu->ts && fpregs_active())
- clts();
-
/*
* If the Guest page faulted, then the cr2 register will tell us the
* bad virtual address. We have to grab this now, because once we
@@ -421,12 +409,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
kill_guest(cpu, "Writing cr2");
break;
case 7: /* We've intercepted a Device Not Available fault. */
- /*
- * If the Guest doesn't want to know, we already restored the
- * Floating Point Unit, so we just continue without telling it.
- */
- if (!cpu->ts)
- return;
+ /* No special handling is needed here. */
break;
case 32 ... 255:
/* This might be a syscall. */
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
index fdf954c2107f..cfa1039c62e7 100644
--- a/drivers/misc/lkdtm.h
+++ b/drivers/misc/lkdtm.h
@@ -21,6 +21,8 @@ void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
void lkdtm_ATOMIC_UNDERFLOW(void);
void lkdtm_ATOMIC_OVERFLOW(void);
+void lkdtm_CORRUPT_LIST_ADD(void);
+void lkdtm_CORRUPT_LIST_DEL(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
index 182ae1894b32..f336206d4b1f 100644
--- a/drivers/misc/lkdtm_bugs.c
+++ b/drivers/misc/lkdtm_bugs.c
@@ -5,8 +5,13 @@
* test source files.
*/
#include "lkdtm.h"
+#include <linux/list.h>
#include <linux/sched.h>
+struct lkdtm_list {
+ struct list_head node;
+};
+
/*
* Make sure our attempts to over run the kernel stack doesn't trigger
* a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
@@ -146,3 +151,66 @@ void lkdtm_ATOMIC_OVERFLOW(void)
pr_info("attempting bad atomic overflow\n");
atomic_inc(&over);
}
+
+void lkdtm_CORRUPT_LIST_ADD(void)
+{
+ /*
+ * Initially, an empty list via LIST_HEAD:
+ * test_head.next = &test_head
+ * test_head.prev = &test_head
+ */
+ LIST_HEAD(test_head);
+ struct lkdtm_list good, bad;
+ void *target[2] = { };
+ void *redirection = &target;
+
+ pr_info("attempting good list addition\n");
+
+ /*
+ * Adding to the list performs these actions:
+ * test_head.next->prev = &good.node
+ * good.node.next = test_head.next
+ * good.node.prev = test_head
+ * test_head.next = good.node
+ */
+ list_add(&good.node, &test_head);
+
+ pr_info("attempting corrupted list addition\n");
+ /*
+ * In simulating this "write what where" primitive, the "what" is
+ * the address of &bad.node, and the "where" is the address held
+ * by "redirection".
+ */
+ test_head.next = redirection;
+ list_add(&bad.node, &test_head);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_add() corruption not detected!\n");
+}
+
+void lkdtm_CORRUPT_LIST_DEL(void)
+{
+ LIST_HEAD(test_head);
+ struct lkdtm_list item;
+ void *target[2] = { };
+ void *redirection = &target;
+
+ list_add(&item.node, &test_head);
+
+ pr_info("attempting good list removal\n");
+ list_del(&item.node);
+
+ pr_info("attempting corrupted list removal\n");
+ list_add(&item.node, &test_head);
+
+ /* As with the list_add() test above, this corrupts "next". */
+ item.node.next = redirection;
+ list_del(&item.node);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_del() corruption not detected!\n");
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
index f9154b8d67f6..7eeb71a75549 100644
--- a/drivers/misc/lkdtm_core.c
+++ b/drivers/misc/lkdtm_core.c
@@ -197,6 +197,8 @@ struct crashtype crashtypes[] = {
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
CRASHTYPE(OVERFLOW),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
index 6b94ba610399..98cc8f535021 100644
--- a/drivers/net/ethernet/adi/Kconfig
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -58,7 +58,7 @@ config BFIN_RX_DESC_NUM
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
depends on BFIN_MAC && BF518
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
default y
---help---
To support the IEEE 1588 Precision Time Protocol (PTP), select y here
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 7ab6efbe4189..d5c15e8bb3de 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -177,9 +177,9 @@ config AMD_XGBE
depends on X86 || ARM64 || COMPILE_TEST
select BITREVERSE
select CRC32
- select PTP_1588_CLOCK
select PHYLIB
select AMD_XGBE_HAVE_ECC if X86
+ imply PTP_1588_CLOCK
---help---
This driver supports the AMD 10GbE Ethernet device found on an
AMD SoC.
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index b87a89988ffd..17ac8f9a51a0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -422,7 +422,8 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
goto err_wq;
}
- xgbe_ptp_register(pdata);
+ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
+ xgbe_ptp_register(pdata);
xgbe_debugfs_init(pdata);
@@ -448,7 +449,8 @@ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
xgbe_debugfs_exit(pdata);
- xgbe_ptp_unregister(pdata);
+ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
+ xgbe_ptp_unregister(pdata);
pdata->phy_if.phy_exit(pdata);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 404c020cb82e..940fb24bba21 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -110,7 +110,7 @@ config TIGON3
depends on PCI
select PHYLIB
select HWMON
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
@@ -120,7 +120,7 @@ config TIGON3
config BNX2X
tristate "Broadcom NetXtremeII 10Gb support"
depends on PCI
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
select FW_LOADER
select ZLIB_INFLATE
select LIBCRC32C
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c0679c21638a..bbc8bd16cb97 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -53,7 +53,7 @@ config THUNDER_NIC_RGX
config LIQUIDIO
tristate "Cavium LiquidIO support"
depends on 64BIT
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
---help---
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 0d415516b577..6e490fd2345d 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -25,7 +25,7 @@ config FEC
ARCH_MXC || SOC_IMX28)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index c0e17433f623..1349b45f014d 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -58,7 +58,7 @@ config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -83,7 +83,7 @@ config E1000E_HWTS
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
select I2C
select I2C_ALGOBIT
---help---
@@ -156,7 +156,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
select MDIO
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
@@ -213,7 +213,7 @@ config IXGBEVF
config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
depends on PCI
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -264,7 +264,7 @@ config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
depends on PCI_MSI
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 5098e7f21987..22b1cc012bc9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -7,7 +7,7 @@ config MLX4_EN
depends on MAY_USE_DEVLINK
depends on PCI
select MLX4_CORE
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports Mellanox Technologies ConnectX Ethernet
devices.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index aae46884bf93..2cd841590e70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,7 +14,7 @@ config MLX5_CORE
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 85ec447c2d18..27be51f0a421 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -37,7 +37,7 @@ config RAVB
select MII
select MDIO_BITBANG
select PHYLIB
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
help
Renesas Ethernet AVB device driver.
This driver supports the following SoCs:
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
index 2360d8150777..fbd5e06654c6 100644
--- a/drivers/net/ethernet/samsung/Kconfig
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -21,7 +21,7 @@ config SXGBE_ETH
depends on HAS_IOMEM && HAS_DMA
select PHYLIB
select CRC32
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This is the driver for the SXGBE 10G Ethernet IP block found on
Samsung platforms.
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 605ebc73b2b2..46f7be85f5a3 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -5,7 +5,7 @@ config SFC
select CRC32
select I2C
select I2C_ALGOBIT
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC9000-family and SFC9100-family controllers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index d37e32d55ca9..ab66248a4b78 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -4,7 +4,7 @@ config STMMAC_ETH
select MII
select PHYLIB
select CRC32
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
select RESET_CONTROLLER
---help---
This is the driver for the Ethernet IPs are built around a
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index ff7f518a0702..c307b206a362 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -76,7 +76,7 @@ config TI_CPSW
config TI_CPTS
tristate "TI Common Platform Time Sync (CPTS) Support"
depends on TI_CPSW
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
---help---
This driver supports the Common Platform Time Sync unit of
the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index f59a6c265331..bdfeaf3d4fce 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -9,7 +9,7 @@ config TILE_NET
select CRC32
select TILE_GXIO_MPIPE if TILEGX
select HIGH_RES_TIMERS if TILEGX
- select PTP_1588_CLOCK if TILEGX
+ imply PTP_1588_CLOCK if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
index 9559829fb234..e65a576e4032 100644
--- a/drivers/oprofile/nmi_timer_int.c
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -59,25 +59,16 @@ static void nmi_timer_stop_cpu(int cpu)
perf_event_disable(event);
}
-static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
- void *data)
+static int nmi_timer_cpu_online(unsigned int cpu)
{
- int cpu = (unsigned long)data;
- switch (action) {
- case CPU_DOWN_FAILED:
- case CPU_ONLINE:
- nmi_timer_start_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- nmi_timer_stop_cpu(cpu);
- break;
- }
- return NOTIFY_DONE;
+ nmi_timer_start_cpu(cpu);
+ return 0;
+}
+static int nmi_timer_cpu_predown(unsigned int cpu)
+{
+ nmi_timer_stop_cpu(cpu);
+ return 0;
}
-
-static struct notifier_block nmi_timer_cpu_nb = {
- .notifier_call = nmi_timer_cpu_notifier
-};
static int nmi_timer_start(void)
{
@@ -103,13 +94,14 @@ static void nmi_timer_stop(void)
put_online_cpus();
}
+static enum cpuhp_state hp_online;
+
static void nmi_timer_shutdown(void)
{
struct perf_event *event;
int cpu;
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&nmi_timer_cpu_nb);
+ cpuhp_remove_state(hp_online);
for_each_possible_cpu(cpu) {
event = per_cpu(nmi_timer_events, cpu);
if (!event)
@@ -118,13 +110,11 @@ static void nmi_timer_shutdown(void)
per_cpu(nmi_timer_events, cpu) = NULL;
perf_event_release_kernel(event);
}
-
- cpu_notifier_register_done();
}
static int nmi_timer_setup(void)
{
- int cpu, err;
+ int err;
u64 period;
/* clock cycles per tick: */
@@ -132,24 +122,14 @@ static int nmi_timer_setup(void)
do_div(period, HZ);
nmi_timer_attr.sample_period = period;
- cpu_notifier_register_begin();
- err = __register_cpu_notifier(&nmi_timer_cpu_nb);
- if (err)
- goto out;
-
- /* can't attach events to offline cpus: */
- for_each_online_cpu(cpu) {
- err = nmi_timer_start_cpu(cpu);
- if (err) {
- cpu_notifier_register_done();
- nmi_timer_shutdown();
- return err;
- }
+ err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "oprofile/nmi:online",
+ nmi_timer_cpu_online, nmi_timer_cpu_predown);
+ if (err < 0) {
+ nmi_timer_shutdown();
+ return err;
}
-
-out:
- cpu_notifier_register_done();
- return err;
+ hp_online = err;
+ return 0;
}
int __init op_nmi_timer_init(struct oprofile_operations *ops)
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index a6456b578269..1f38d0836751 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -360,16 +360,16 @@ static void xgene_msi_isr(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static enum cpuhp_state pci_xgene_online;
+
static int xgene_msi_remove(struct platform_device *pdev)
{
- int virq, i;
struct xgene_msi *msi = platform_get_drvdata(pdev);
- for (i = 0; i < NR_HW_IRQS; i++) {
- virq = msi->msi_groups[i].gic_irq;
- if (virq != 0)
- irq_set_chained_handler_and_data(virq, NULL, NULL);
- }
+ if (pci_xgene_online)
+ cpuhp_remove_state(pci_xgene_online);
+ cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
+
kfree(msi->msi_groups);
kfree(msi->bitmap);
@@ -427,7 +427,7 @@ static int xgene_msi_hwirq_alloc(unsigned int cpu)
return 0;
}
-static void xgene_msi_hwirq_free(unsigned int cpu)
+static int xgene_msi_hwirq_free(unsigned int cpu)
{
struct xgene_msi *msi = &xgene_msi_ctrl;
struct xgene_msi_group *msi_group;
@@ -441,33 +441,9 @@ static void xgene_msi_hwirq_free(unsigned int cpu)
irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
NULL);
}
+ return 0;
}
-static int xgene_msi_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- xgene_msi_hwirq_alloc(cpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- xgene_msi_hwirq_free(cpu);
- break;
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block xgene_msi_cpu_notifier = {
- .notifier_call = xgene_msi_cpu_callback,
-};
-
static const struct of_device_id xgene_msi_match_table[] = {
{.compatible = "apm,xgene1-msi"},
{},
@@ -478,7 +454,6 @@ static int xgene_msi_probe(struct platform_device *pdev)
struct resource *res;
int rc, irq_index;
struct xgene_msi *xgene_msi;
- unsigned int cpu;
int virt_msir;
u32 msi_val, msi_idx;
@@ -540,28 +515,22 @@ static int xgene_msi_probe(struct platform_device *pdev)
}
}
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu)
- if (xgene_msi_hwirq_alloc(cpu)) {
- dev_err(&pdev->dev, "failed to register MSI handlers\n");
- cpu_notifier_register_done();
- goto error;
- }
-
- rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier);
- if (rc) {
- dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
- cpu_notifier_register_done();
- goto error;
- }
-
- cpu_notifier_register_done();
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
+ xgene_msi_hwirq_alloc, NULL);
+ if (rc)
+ goto err_cpuhp;
+ pci_xgene_online = rc;
+ rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
+ xgene_msi_hwirq_free);
+ if (rc)
+ goto err_cpuhp;
dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
return 0;
+err_cpuhp:
+ dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
error:
xgene_msi_remove(pdev);
return rc;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index da95f4e689f2..a7439a154dd6 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -551,14 +551,14 @@ error_attrs:
}
static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
+msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
{
struct cpumask *masks = NULL;
struct msi_desc *entry;
u16 control;
- if (affinity) {
- masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (affd) {
+ masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
@@ -618,7 +618,8 @@ static int msi_verify_entries(struct pci_dev *dev)
* an error, and a positive return value indicates the number of interrupts
* which could have been allocated.
*/
-static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
+static int msi_capability_init(struct pci_dev *dev, int nvec,
+ const struct irq_affinity *affd)
{
struct msi_desc *entry;
int ret;
@@ -626,7 +627,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
- entry = msi_setup_entry(dev, nvec, affinity);
+ entry = msi_setup_entry(dev, nvec, affd);
if (!entry)
return -ENOMEM;
@@ -690,14 +691,14 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
- bool affinity)
+ const struct irq_affinity *affd)
{
struct cpumask *curmsk, *masks = NULL;
struct msi_desc *entry;
int ret, i;
- if (affinity) {
- masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
+ if (affd) {
+ masks = irq_create_affinity_masks(nvec, affd);
if (!masks)
pr_err("Unable to allocate affinity masks, ignoring\n");
}
@@ -753,14 +754,14 @@ static void msix_program_entries(struct pci_dev *dev,
* @dev: pointer to the pci_dev data structure of MSI-X device function
* @entries: pointer to an array of struct msix_entry entries
* @nvec: number of @entries
- * @affinity: flag to indicate CPU IRQ affinity mask should be set
+ * @affd: Optional pointer to enable automatic affinity assignement
*
* Setup the MSI-X capability structure of device function with a
* single MSI-X IRQ. A return of zero indicates the successful setup of
* requested MSI-X entries with allocated IRQs or non-zero for otherwise.
**/
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, bool affinity)
+ int nvec, const struct irq_affinity *affd)
{
int ret;
u16 control;
@@ -775,7 +776,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
if (!base)
return -ENOMEM;
- ret = msix_setup_entries(dev, base, entries, nvec, affinity);
+ ret = msix_setup_entries(dev, base, entries, nvec, affd);
if (ret)
return ret;
@@ -956,7 +957,7 @@ int pci_msix_vec_count(struct pci_dev *dev)
EXPORT_SYMBOL(pci_msix_vec_count);
static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
- int nvec, bool affinity)
+ int nvec, const struct irq_affinity *affd)
{
int nr_entries;
int i, j;
@@ -988,7 +989,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
return -EINVAL;
}
- return msix_capability_init(dev, entries, nvec, affinity);
+ return msix_capability_init(dev, entries, nvec, affd);
}
/**
@@ -1008,7 +1009,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
**/
int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
- return __pci_enable_msix(dev, entries, nvec, false);
+ return __pci_enable_msix(dev, entries, nvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msix);
@@ -1059,9 +1060,8 @@ int pci_msi_enabled(void)
EXPORT_SYMBOL(pci_msi_enabled);
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
- unsigned int flags)
+ const struct irq_affinity *affd)
{
- bool affinity = flags & PCI_IRQ_AFFINITY;
int nvec;
int rc;
@@ -1090,14 +1090,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
nvec = maxvec;
for (;;) {
- if (affinity) {
- nvec = irq_calc_affinity_vectors(dev->irq_affinity,
- nvec);
+ if (affd) {
+ nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
- rc = msi_capability_init(dev, nvec, affinity);
+ rc = msi_capability_init(dev, nvec, affd);
if (rc == 0)
return nvec;
@@ -1124,29 +1123,27 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
**/
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
{
- return __pci_enable_msi_range(dev, minvec, maxvec, 0);
+ return __pci_enable_msi_range(dev, minvec, maxvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msi_range);
static int __pci_enable_msix_range(struct pci_dev *dev,
- struct msix_entry *entries, int minvec, int maxvec,
- unsigned int flags)
+ struct msix_entry *entries, int minvec,
+ int maxvec, const struct irq_affinity *affd)
{
- bool affinity = flags & PCI_IRQ_AFFINITY;
int rc, nvec = maxvec;
if (maxvec < minvec)
return -ERANGE;
for (;;) {
- if (affinity) {
- nvec = irq_calc_affinity_vectors(dev->irq_affinity,
- nvec);
+ if (affd) {
+ nvec = irq_calc_affinity_vectors(nvec, affd);
if (nvec < minvec)
return -ENOSPC;
}
- rc = __pci_enable_msix(dev, entries, nvec, affinity);
+ rc = __pci_enable_msix(dev, entries, nvec, affd);
if (rc == 0)
return nvec;
@@ -1177,16 +1174,17 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec)
{
- return __pci_enable_msix_range(dev, entries, minvec, maxvec, 0);
+ return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL);
}
EXPORT_SYMBOL(pci_enable_msix_range);
/**
- * pci_alloc_irq_vectors - allocate multiple IRQs for a device
+ * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
* @dev: PCI device to operate on
* @min_vecs: minimum number of vectors required (must be >= 1)
* @max_vecs: maximum (desired) number of vectors
* @flags: flags or quirks for the allocation
+ * @affd: optional description of the affinity requirements
*
* Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
* vectors if available, and fall back to a single legacy vector
@@ -1198,20 +1196,30 @@ EXPORT_SYMBOL(pci_enable_msix_range);
* To get the Linux IRQ number used for a vector that can be passed to
* request_irq() use the pci_irq_vector() helper.
*/
-int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags)
+int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *affd)
{
+ static const struct irq_affinity msi_default_affd;
int vecs = -ENOSPC;
+ if (flags & PCI_IRQ_AFFINITY) {
+ if (!affd)
+ affd = &msi_default_affd;
+ } else {
+ if (WARN_ON(affd))
+ affd = NULL;
+ }
+
if (flags & PCI_IRQ_MSIX) {
vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
- flags);
+ affd);
if (vecs > 0)
return vecs;
}
if (flags & PCI_IRQ_MSI) {
- vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, flags);
+ vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
if (vecs > 0)
return vecs;
}
@@ -1224,7 +1232,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
return vecs;
}
-EXPORT_SYMBOL(pci_alloc_irq_vectors);
+EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
/**
* pci_free_irq_vectors - free previously allocated IRQs for a device
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index ee3de3421f2d..bdce33291161 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -6,7 +6,7 @@ menu "PTP clock support"
config PTP_1588_CLOCK
tristate "PTP clock support"
- depends on NET
+ depends on NET && POSIX_TIMERS
select PPS
select NET_PTP_CLASSIFY
help
@@ -28,7 +28,7 @@ config PTP_1588_CLOCK
config PTP_1588_CLOCK_GIANFAR
tristate "Freescale eTSEC as PTP clock"
depends on GIANFAR
- select PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
default y
help
This driver adds support for using the eTSEC as a PTP
@@ -42,7 +42,7 @@ config PTP_1588_CLOCK_GIANFAR
config PTP_1588_CLOCK_IXP46X
tristate "Intel IXP46x as PTP clock"
depends on IXP4XX_ETH
- select PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
default y
help
This driver adds support for using the IXP46X as a PTP
@@ -60,7 +60,7 @@ config DP83640_PHY
tristate "Driver for the National Semiconductor DP83640 PHYTER"
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
- select PTP_1588_CLOCK
+ depends on PTP_1588_CLOCK
---help---
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -76,7 +76,7 @@ config PTP_1588_CLOCK_PCH
tristate "Intel PCH EG20T as PTP clock"
depends on X86_32 || COMPILE_TEST
depends on HAS_IOMEM && NET
- select PTP_1588_CLOCK
+ imply PTP_1588_CLOCK
help
This driver adds support for using the PCH EG20T as a PTP
clock. The hardware supports time stamping of PTP packets
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7030d7cd3861..38aa8e1906c2 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -191,6 +191,13 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
static int cmos_read_time(struct device *dev, struct rtc_time *t)
{
+ /*
+ * If pm_trace abused the RTC for storage, set the timespec to 0,
+ * which tells the caller that this RTC value is unusable.
+ */
+ if (!pm_trace_rtc_valid())
+ return -EIO;
+
/* REVISIT: if the clock has a "century" register, use
* that instead of the heuristic in mc146818_get_time().
* That'll make Y3K compatility (year > 2070) easy!
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index dd3f5d7617d2..8354d4dabdad 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -53,58 +53,38 @@ static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
return p;
}
-static void bman_offline_cpu(unsigned int cpu)
+static int bman_offline_cpu(unsigned int cpu)
{
struct bman_portal *p = affine_bportals[cpu];
const struct bm_portal_config *pcfg;
if (!p)
- return;
+ return 0;
pcfg = bman_get_bm_portal_config(p);
if (!pcfg)
- return;
+ return 0;
irq_set_affinity(pcfg->irq, cpumask_of(0));
+ return 0;
}
-static void bman_online_cpu(unsigned int cpu)
+static int bman_online_cpu(unsigned int cpu)
{
struct bman_portal *p = affine_bportals[cpu];
const struct bm_portal_config *pcfg;
if (!p)
- return;
+ return 0;
pcfg = bman_get_bm_portal_config(p);
if (!pcfg)
- return;
+ return 0;
irq_set_affinity(pcfg->irq, cpumask_of(cpu));
+ return 0;
}
-static int bman_hotplug_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- bman_online_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- bman_offline_cpu(cpu);
- }
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block bman_hotplug_cpu_notifier = {
- .notifier_call = bman_hotplug_cpu_callback,
-};
-
static int bman_portal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -217,8 +197,14 @@ static int __init bman_portal_driver_register(struct platform_driver *drv)
if (ret < 0)
return ret;
- register_hotcpu_notifier(&bman_hotplug_cpu_notifier);
-
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qbman_portal:online",
+ bman_online_cpu, bman_offline_cpu);
+ if (ret < 0) {
+ pr_err("bman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index c9a9bcb1aea2..adbaa30d3c5a 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -186,7 +186,7 @@ static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
qman_set_sdest(pcfg->channel, cpu);
}
-static void qman_offline_cpu(unsigned int cpu)
+static int qman_offline_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
@@ -199,9 +199,10 @@ static void qman_offline_cpu(unsigned int cpu)
qman_portal_update_sdest(pcfg, 0);
}
}
+ return 0;
}
-static void qman_online_cpu(unsigned int cpu)
+static int qman_online_cpu(unsigned int cpu)
{
struct qman_portal *p;
const struct qm_portal_config *pcfg;
@@ -214,31 +215,9 @@ static void qman_online_cpu(unsigned int cpu)
qman_portal_update_sdest(pcfg, cpu);
}
}
+ return 0;
}
-static int qman_hotplug_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- qman_online_cpu(cpu);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- qman_offline_cpu(cpu);
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block qman_hotplug_cpu_notifier = {
- .notifier_call = qman_hotplug_cpu_callback,
-};
-
static int qman_portal_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -365,8 +344,14 @@ static int __init qman_portal_driver_register(struct platform_driver *drv)
if (ret < 0)
return ret;
- register_hotcpu_notifier(&qman_hotplug_cpu_notifier);
-
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qman_portal:online",
+ qman_online_cpu, qman_offline_cpu);
+ if (ret < 0) {
+ pr_err("qman: failed to register hotplug callbacks.\n");
+ platform_driver_unregister(drv);
+ return ret;
+ }
return 0;
}
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index c121acc15bfe..d35db16aa43f 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,6 +1,8 @@
menuconfig THUNDERBOLT
tristate "Thunderbolt support for Apple devices"
depends on PCI
+ depends on X86 || COMPILE_TEST
+ select APPLE_PROPERTIES if EFI_STUB && X86
select CRC32
help
Cactus Ridge Thunderbolt Controller driver
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 2b9602c2c355..6392990c984d 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -5,6 +5,7 @@
*/
#include <linux/crc32.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include "tb.h"
@@ -360,6 +361,40 @@ static int tb_drom_parse_entries(struct tb_switch *sw)
}
/**
+ * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
+ */
+static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
+{
+ struct device *dev = &sw->tb->nhi->pdev->dev;
+ int len, res;
+
+ len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0);
+ if (len < 0 || len < sizeof(struct tb_drom_header))
+ return -EINVAL;
+
+ sw->drom = kmalloc(len, GFP_KERNEL);
+ if (!sw->drom)
+ return -ENOMEM;
+
+ res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
+ len);
+ if (res)
+ goto err;
+
+ *size = ((struct tb_drom_header *)sw->drom)->data_len +
+ TB_DROM_DATA_START;
+ if (*size > len)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(sw->drom);
+ sw->drom = NULL;
+ return -EINVAL;
+}
+
+/**
* tb_drom_read - copy drom to sw->drom and parse it
*/
int tb_drom_read(struct tb_switch *sw)
@@ -374,6 +409,13 @@ int tb_drom_read(struct tb_switch *sw)
if (tb_route(sw) == 0) {
/*
+ * Apple's NHI EFI driver supplies a DROM for the root switch
+ * in a device property. Use it if available.
+ */
+ if (tb_drom_copy_efi(sw, &size) == 0)
+ goto parse;
+
+ /*
* The root switch contains only a dummy drom (header only,
* no entries). Hardcode the configuration here.
*/
@@ -418,6 +460,7 @@ int tb_drom_read(struct tb_switch *sw)
if (res)
goto err;
+parse:
header = (void *) sw->drom;
if (header->data_len + TB_DROM_DATA_START != size) {
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 9840fdecb73b..c6f30b1695a9 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -460,7 +460,7 @@ int tb_switch_resume(struct tb_switch *sw)
tb_sw_warn(sw, "uid read failed\n");
return err;
}
- if (sw->uid != uid) {
+ if (sw != sw->tb->root_switch && sw->uid != uid) {
tb_sw_info(sw,
"changed while suspended (uid %#llx -> %#llx)\n",
sw->uid, uid);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5dc128a8da83..5dc34653274a 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -342,7 +342,7 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
endtime = busy_clock() + vq->busyloop_timeout;
while (vhost_can_busy_poll(vq->dev, endtime) &&
vhost_vq_avail_empty(vq->dev, vq))
- cpu_relax_lowlatency();
+ cpu_relax();
preempt_enable();
r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
out_num, in_num, NULL, NULL);
@@ -533,7 +533,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
while (vhost_can_busy_poll(&net->dev, endtime) &&
!sk_has_rx_data(sk) &&
vhost_vq_avail_empty(&net->dev, vq))
- cpu_relax_lowlatency();
+ cpu_relax();
preempt_enable();
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 37a37c4d04cb..8c4dc1e1f94f 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -118,6 +118,31 @@ static inline bool fb_base_is_valid(void)
return false;
}
+#define efifb_attr_decl(name, fmt) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return sprintf(buf, fmt "\n", (screen_info.lfb_##name)); \
+} \
+static DEVICE_ATTR_RO(name)
+
+efifb_attr_decl(base, "0x%x");
+efifb_attr_decl(linelength, "%u");
+efifb_attr_decl(height, "%u");
+efifb_attr_decl(width, "%u");
+efifb_attr_decl(depth, "%u");
+
+static struct attribute *efifb_attrs[] = {
+ &dev_attr_base.attr,
+ &dev_attr_linelength.attr,
+ &dev_attr_width.attr,
+ &dev_attr_height.attr,
+ &dev_attr_depth.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(efifb);
+
static int efifb_probe(struct platform_device *dev)
{
struct fb_info *info;
@@ -205,14 +230,13 @@ static int efifb_probe(struct platform_device *dev)
} else {
/* We cannot make this fatal. Sometimes this comes from magic
spaces our resource handlers simply don't know about */
- printk(KERN_WARNING
- "efifb: cannot reserve video memory at 0x%lx\n",
+ pr_warn("efifb: cannot reserve video memory at 0x%lx\n",
efifb_fix.smem_start);
}
info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
if (!info) {
- printk(KERN_ERR "efifb: cannot allocate framebuffer\n");
+ pr_err("efifb: cannot allocate framebuffer\n");
err = -ENOMEM;
goto err_release_mem;
}
@@ -230,16 +254,15 @@ static int efifb_probe(struct platform_device *dev)
info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
- printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
- "0x%x @ 0x%lx\n",
+ pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
efifb_fix.smem_len, efifb_fix.smem_start);
err = -EIO;
goto err_release_fb;
}
- printk(KERN_INFO "efifb: framebuffer at 0x%lx, using %dk, total %dk\n",
+ pr_info("efifb: framebuffer at 0x%lx, using %dk, total %dk\n",
efifb_fix.smem_start, size_remap/1024, size_total/1024);
- printk(KERN_INFO "efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
+ pr_info("efifb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
efifb_defined.xres, efifb_defined.yres,
efifb_defined.bits_per_pixel, efifb_fix.line_length,
screen_info.pages);
@@ -247,7 +270,7 @@ static int efifb_probe(struct platform_device *dev)
efifb_defined.xres_virtual = efifb_defined.xres;
efifb_defined.yres_virtual = efifb_fix.smem_len /
efifb_fix.line_length;
- printk(KERN_INFO "efifb: scrolling: redraw\n");
+ pr_info("efifb: scrolling: redraw\n");
efifb_defined.yres_virtual = efifb_defined.yres;
/* some dummy values for timing to make fbset happy */
@@ -265,7 +288,7 @@ static int efifb_probe(struct platform_device *dev)
efifb_defined.transp.offset = screen_info.rsvd_pos;
efifb_defined.transp.length = screen_info.rsvd_size;
- printk(KERN_INFO "efifb: %s: "
+ pr_info("efifb: %s: "
"size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
"Truecolor",
screen_info.rsvd_size,
@@ -285,12 +308,19 @@ static int efifb_probe(struct platform_device *dev)
info->fix = efifb_fix;
info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE;
- if ((err = fb_alloc_cmap(&info->cmap, 256, 0)) < 0) {
- printk(KERN_ERR "efifb: cannot allocate colormap\n");
+ err = sysfs_create_groups(&dev->dev.kobj, efifb_groups);
+ if (err) {
+ pr_err("efifb: cannot add sysfs attrs\n");
goto err_unmap;
}
- if ((err = register_framebuffer(info)) < 0) {
- printk(KERN_ERR "efifb: cannot register framebuffer\n");
+ err = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (err < 0) {
+ pr_err("efifb: cannot allocate colormap\n");
+ goto err_groups;
+ }
+ err = register_framebuffer(info);
+ if (err < 0) {
+ pr_err("efifb: cannot register framebuffer\n");
goto err_fb_dealoc;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
@@ -298,6 +328,8 @@ static int efifb_probe(struct platform_device *dev)
err_fb_dealoc:
fb_dealloc_cmap(&info->cmap);
+err_groups:
+ sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
err_unmap:
iounmap(info->screen_base);
err_release_fb:
@@ -313,6 +345,7 @@ static int efifb_remove(struct platform_device *pdev)
struct fb_info *info = platform_get_drvdata(pdev);
unregister_framebuffer(info);
+ sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
framebuffer_release(info);
return 0;
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index b55981f88a08..529182d7d8a7 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -374,7 +374,7 @@ void octeon_wdt_nmi_stage3(u64 reg[32])
octeon_wdt_write_string("*** Chip soft reset soon ***\r\n");
}
-static void octeon_wdt_disable_interrupt(int cpu)
+static int octeon_wdt_cpu_pre_down(unsigned int cpu)
{
unsigned int core;
unsigned int irq;
@@ -392,9 +392,10 @@ static void octeon_wdt_disable_interrupt(int cpu)
cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
free_irq(irq, octeon_wdt_poke_irq);
+ return 0;
}
-static void octeon_wdt_setup_interrupt(int cpu)
+static int octeon_wdt_cpu_online(unsigned int cpu)
{
unsigned int core;
unsigned int irq;
@@ -424,25 +425,8 @@ static void octeon_wdt_setup_interrupt(int cpu)
ciu_wdog.s.len = timeout_cnt;
ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */
cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64);
-}
-static int octeon_wdt_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- octeon_wdt_disable_interrupt(cpu);
- break;
- case CPU_ONLINE:
- case CPU_DOWN_FAILED:
- octeon_wdt_setup_interrupt(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+ return 0;
}
static int octeon_wdt_ping(struct watchdog_device __always_unused *wdog)
@@ -531,10 +515,6 @@ static int octeon_wdt_stop(struct watchdog_device *wdog)
return 0;
}
-static struct notifier_block octeon_wdt_cpu_notifier = {
- .notifier_call = octeon_wdt_cpu_callback,
-};
-
static const struct watchdog_info octeon_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
.identity = "OCTEON",
@@ -553,6 +533,7 @@ static struct watchdog_device octeon_wdt = {
.ops = &octeon_wdt_ops,
};
+static enum cpuhp_state octeon_wdt_online;
/**
* Module/ driver initialization.
*
@@ -562,7 +543,6 @@ static int __init octeon_wdt_init(void)
{
int i;
int ret;
- int cpu;
u64 *ptr;
/*
@@ -610,14 +590,16 @@ static int __init octeon_wdt_init(void)
cpumask_clear(&irq_enabled_cpus);
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- octeon_wdt_setup_interrupt(cpu);
-
- __register_hotcpu_notifier(&octeon_wdt_cpu_notifier);
- cpu_notifier_register_done();
-
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "watchdog/octeon:online",
+ octeon_wdt_cpu_online, octeon_wdt_cpu_pre_down);
+ if (ret < 0)
+ goto err;
+ octeon_wdt_online = ret;
return 0;
+err:
+ cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0);
+ watchdog_unregister_device(&octeon_wdt);
+ return ret;
}
/**
@@ -625,22 +607,8 @@ static int __init octeon_wdt_init(void)
*/
static void __exit octeon_wdt_cleanup(void)
{
- int cpu;
-
watchdog_unregister_device(&octeon_wdt);
-
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier);
-
- for_each_online_cpu(cpu) {
- int core = cpu2core(cpu);
- /* Disable the watchdog */
- cvmx_write_csr(CVMX_CIU_WDOGX(core), 0);
- /* Free the interrupt handler */
- free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq);
- }
-
- cpu_notifier_register_done();
+ cpuhp_remove_state(octeon_wdt_online);
/*
* Disable the boot-bus memory, the code it points to is soon
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 9ecfcdcdd6d6..9ad622ab05dc 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1256,7 +1256,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
irq_enter();
#ifdef CONFIG_X86
- exit_idle();
inc_irq_stat(irq_hv_callback_count);
#endif
diff --git a/fs/buffer.c b/fs/buffer.c
index 1104ce8b4536..d21771fcf7d3 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3445,7 +3445,7 @@ void free_buffer_head(struct buffer_head *bh)
}
EXPORT_SYMBOL(free_buffer_head);
-static void buffer_exit_cpu(int cpu)
+static int buffer_exit_cpu_dead(unsigned int cpu)
{
int i;
struct bh_lru *b = &per_cpu(bh_lrus, cpu);
@@ -3456,14 +3456,7 @@ static void buffer_exit_cpu(int cpu)
}
this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
per_cpu(bh_accounting, cpu).nr = 0;
-}
-
-static int buffer_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
- buffer_exit_cpu((unsigned long)hcpu);
- return NOTIFY_OK;
+ return 0;
}
/**
@@ -3513,6 +3506,7 @@ EXPORT_SYMBOL(bh_submit_read);
void __init buffer_init(void)
{
unsigned long nrpages;
+ int ret;
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
@@ -3525,5 +3519,7 @@ void __init buffer_init(void)
*/
nrpages = (nr_free_buffer_pages() * 10) / 100;
max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
- hotcpu_notifier(buffer_cpu_notify, 0);
+ ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
+ NULL, buffer_exit_cpu_dead);
+ WARN_ON(ret < 0);
}
diff --git a/fs/exec.c b/fs/exec.c
index 4e497b9ee71e..923c57d96899 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1169,8 +1169,10 @@ no_thread_group:
/* we have changed execution domain */
tsk->exit_signal = SIGCHLD;
+#ifdef CONFIG_POSIX_TIMERS
exit_itimers(sig);
flush_itimer_signals();
+#endif
if (atomic_read(&oldsighand->count) != 1) {
struct sighand_struct *newsighand;
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
index fe386fc6e85e..6bb8cd45f53b 100644
--- a/include/asm-generic/cputime_jiffies.h
+++ b/include/asm-generic/cputime_jiffies.h
@@ -7,7 +7,6 @@ typedef unsigned long __nocast cputime_t;
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
-#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
typedef u64 __nocast cputime64_t;
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index a84e28e0c634..4e3b18e559b1 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -34,7 +34,6 @@ typedef u64 __nocast cputime64_t;
*/
#define cputime_to_jiffies(__ct) \
cputime_div(__ct, NSEC_PER_SEC / HZ)
-#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__jif) \
(__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
#define cputime64_to_jiffies64(__ct) \
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
deleted file mode 100644
index c54829d3de37..000000000000
--- a/include/asm-generic/mutex-dec.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * include/asm-generic/mutex-dec.h
- *
- * Generic implementation of the mutex fastpath, based on atomic
- * decrement/increment.
- */
-#ifndef _ASM_GENERIC_MUTEX_DEC_H
-#define _ASM_GENERIC_MUTEX_DEC_H
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if
- * it wasn't 1 originally. This function MUST leave the value lower than
- * 1 even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_dec_return_acquire(count) < 0))
- fail_fn(count);
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(atomic_dec_return_acquire(count) < 0))
- return -1;
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the count from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than 1.
- *
- * If the implementation sets it to a value of lower than 1, then the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_inc_return_release(count) <= 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 1
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: fallback function
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1))
- return 1;
- return 0;
-}
-
-#endif
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
deleted file mode 100644
index 61069ed334e2..000000000000
--- a/include/asm-generic/mutex-null.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * include/asm-generic/mutex-null.h
- *
- * Generic implementation of the mutex fastpath, based on NOP :-)
- *
- * This is used by the mutex-debugging infrastructure, but it can also
- * be used by architectures that (for whatever reason) want to use the
- * spinlock based slowpath.
- */
-#ifndef _ASM_GENERIC_MUTEX_NULL_H
-#define _ASM_GENERIC_MUTEX_NULL_H
-
-#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
-#define __mutex_fastpath_lock_retval(count) (-1)
-#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
-#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
-#define __mutex_slowpath_needs_to_unlock() 1
-
-#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
deleted file mode 100644
index 3269ec4e195f..000000000000
--- a/include/asm-generic/mutex-xchg.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * include/asm-generic/mutex-xchg.h
- *
- * Generic implementation of the mutex fastpath, based on xchg().
- *
- * NOTE: An xchg based implementation might be less optimal than an atomic
- * decrement/increment based implementation. If your architecture
- * has a reasonable atomic dec/inc then you should probably use
- * asm-generic/mutex-dec.h instead, or you could open-code an
- * optimized version in asm/mutex.h.
- */
-#ifndef _ASM_GENERIC_MUTEX_XCHG_H
-#define _ASM_GENERIC_MUTEX_XCHG_H
-
-/**
- * __mutex_fastpath_lock - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 1
- *
- * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
- * wasn't 1 originally. This function MUST leave the value lower than 1
- * even when the "1" assertion wasn't true.
- */
-static inline void
-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_xchg(count, 0) != 1))
- /*
- * We failed to acquire the lock, so mark it contended
- * to ensure that any waiting tasks are woken up by the
- * unlock slow path.
- */
- if (likely(atomic_xchg_acquire(count, -1) != 1))
- fail_fn(count);
-}
-
-/**
- * __mutex_fastpath_lock_retval - try to take the lock by moving the count
- * from 1 to a 0 value
- * @count: pointer of type atomic_t
- *
- * Change the count from 1 to a value lower than 1. This function returns 0
- * if the fastpath succeeds, or -1 otherwise.
- */
-static inline int
-__mutex_fastpath_lock_retval(atomic_t *count)
-{
- if (unlikely(atomic_xchg_acquire(count, 0) != 1))
- if (likely(atomic_xchg(count, -1) != 1))
- return -1;
- return 0;
-}
-
-/**
- * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
- * @count: pointer of type atomic_t
- * @fail_fn: function to call if the original value was not 0
- *
- * try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
- * In the failure case, this function is allowed to either set the value to
- * 1, or to set it to a value lower than one.
- * If the implementation sets it to a value of lower than one, the
- * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
- * to return 0 otherwise.
- */
-static inline void
-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
-{
- if (unlikely(atomic_xchg_release(count, 1) != 0))
- fail_fn(count);
-}
-
-#define __mutex_slowpath_needs_to_unlock() 0
-
-/**
- * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
- *
- * @count: pointer of type atomic_t
- * @fail_fn: spinlock based trylock implementation
- *
- * Change the count from 1 to a value lower than 1, and return 0 (failure)
- * if it wasn't 1 originally, or return 1 (success) otherwise. This function
- * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
- * Additionally, if the value was < 0 originally, this function must not leave
- * it to 0 on failure.
- *
- * If the architecture has no effective trylock variant, it should call the
- * <fail_fn> spinlock-based trylock variant unconditionally.
- */
-static inline int
-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
-{
- int prev;
-
- if (atomic_read(count) != 1)
- return 0;
-
- prev = atomic_xchg_acquire(count, 0);
- if (unlikely(prev < 0)) {
- /*
- * The lock was marked contended so we must restore that
- * state. If while doing so we get back a prev value of 1
- * then we just own it.
- *
- * [ In the rare case of the mutex going to 1, to 0, to -1
- * and then back to 0 in this few-instructions window,
- * this has the potential to trigger the slowpath for the
- * owner's unlock path needlessly, but that's not a problem
- * in practice. ]
- */
- prev = atomic_xchg_acquire(count, prev);
- if (prev < 0)
- prev = 0;
- }
-
- return prev;
-}
-
-#endif
diff --git a/include/asm-generic/mutex.h b/include/asm-generic/mutex.h
deleted file mode 100644
index fe91ab502793..000000000000
--- a/include/asm-generic/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_GENERIC_MUTEX_H
-#define __ASM_GENERIC_MUTEX_H
-/*
- * Pull in the generic implementation for the mutex fastpath,
- * which is a reasonable default on many architectures.
- */
-
-#include <asm-generic/mutex-dec.h>
-#endif /* __ASM_GENERIC_MUTEX_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index c4f8fd2fd384..41b95d82a185 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -558,10 +558,9 @@ static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
* track_pfn_insert is called when a _new_ single pfn is established
* by vm_insert_pfn().
*/
-static inline int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn)
+static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn)
{
- return 0;
}
/*
@@ -593,8 +592,8 @@ static inline void untrack_pfn_moved(struct vm_area_struct *vma)
extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr,
unsigned long size);
-extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
- pfn_t pfn);
+extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
+ pfn_t pfn);
extern int track_pfn_copy(struct vm_area_struct *vma);
extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
unsigned long size);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d2185b4405ae..5b36974ed60a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -488,6 +488,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPCV2_SUPPORT 0x00000040
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
+#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
extern bool osc_sb_apei_support_acked;
extern bool osc_pc_lpi_support_confirmed;
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 9d8031257a90..c70aac13244a 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -10,7 +10,12 @@ enum alarmtimer_type {
ALARM_REALTIME,
ALARM_BOOTTIME,
+ /* Supported types end here */
ALARM_NUMTYPE,
+
+ /* Used for tracing information. No usable types. */
+ ALARM_REALTIME_FREEZER,
+ ALARM_BOOTTIME_FREEZER,
};
enum alarmtimer_restart {
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 292d6a10b0c2..baff2e8fc8a8 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -121,4 +121,21 @@ static inline enum bug_trap_type report_bug(unsigned long bug_addr,
}
#endif /* CONFIG_GENERIC_BUG */
+
+/*
+ * Since detected data corruption should stop operation on the affected
+ * structures, this returns false if the corruption condition is found.
+ */
+#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \
+ do { \
+ if (unlikely(condition)) { \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \
+ pr_err(fmt, ##__VA_ARGS__); \
+ BUG(); \
+ } else \
+ WARN(1, fmt, ##__VA_ARGS__); \
+ return false; \
+ } \
+ } while (0)
+
#endif /* _LINUX_BUG_H */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index a951fd10aaaa..6a524bf6a06d 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -18,6 +18,7 @@ enum cache_type {
/**
* struct cacheinfo - represent a cache leaf node
+ * @id: This cache's id. It is unique among caches with the same (type, level).
* @type: type of the cache - data, inst or unified
* @level: represents the hierarchy in the multi-level cache
* @coherency_line_size: size of each cache line usually representing
@@ -44,6 +45,7 @@ enum cache_type {
* keeping, the remaining members form the core properties of the cache
*/
struct cacheinfo {
+ unsigned int id;
enum cache_type type;
unsigned int level;
unsigned int coherency_line_size;
@@ -61,6 +63,7 @@ struct cacheinfo {
#define CACHE_WRITE_ALLOCATE BIT(3)
#define CACHE_ALLOCATE_POLICY_MASK \
(CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
+#define CACHE_ID BIT(4)
struct device_node *of_node;
bool disable_sysfs;
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 08398182f56e..65602d395a52 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -169,7 +169,10 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
* @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two)
*
- * Converts cycles to nanoseconds, using the given mult and shift.
+ * Converts clocksource cycles to nanoseconds, using the given @mult and @shift.
+ * The code is optimized for performance and is not intended to work
+ * with absolute clocksource cycles (as those will easily overflow),
+ * but is only intended to be used with relative (delta) clocksource cycles.
*
* XXX - This could use some mult_lxl_ll() asm optimization
*/
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index afe641c02dca..22acee76cf4c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -16,9 +16,11 @@ enum cpuhp_state {
CPUHP_PERF_SUPERH,
CPUHP_X86_HPET_DEAD,
CPUHP_X86_APB_DEAD,
+ CPUHP_X86_MCE_DEAD,
CPUHP_VIRT_NET_DEAD,
CPUHP_SLUB_DEAD,
CPUHP_MM_WRITEBACK_DEAD,
+ CPUHP_MM_VMSTAT_DEAD,
CPUHP_SOFTIRQ_DEAD,
CPUHP_NET_MVNETA_DEAD,
CPUHP_CPUIDLE_DEAD,
@@ -30,6 +32,15 @@ enum cpuhp_state {
CPUHP_ACPI_CPUDRV_DEAD,
CPUHP_S390_PFAULT_DEAD,
CPUHP_BLK_MQ_DEAD,
+ CPUHP_FS_BUFF_DEAD,
+ CPUHP_PRINTK_DEAD,
+ CPUHP_MM_MEMCQ_DEAD,
+ CPUHP_PERCPU_CNT_DEAD,
+ CPUHP_RADIX_DEAD,
+ CPUHP_PAGE_ALLOC_DEAD,
+ CPUHP_NET_DEV_DEAD,
+ CPUHP_PCI_XGENE_DEAD,
+ CPUHP_IOMMU_INTEL_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -49,6 +60,16 @@ enum cpuhp_state {
CPUHP_ARM_SHMOBILE_SCU_PREPARE,
CPUHP_SH_SH3X_PREPARE,
CPUHP_BLK_MQ_PREPARE,
+ CPUHP_NET_FLOW_PREPARE,
+ CPUHP_TOPOLOGY_PREPARE,
+ CPUHP_NET_IUCV_PREPARE,
+ CPUHP_ARM_BL_PREPARE,
+ CPUHP_TRACE_RB_PREPARE,
+ CPUHP_MM_ZS_PREPARE,
+ CPUHP_MM_ZSWP_MEM_PREPARE,
+ CPUHP_MM_ZSWP_POOL_PREPARE,
+ CPUHP_KVM_PPC_BOOK3S_PREPARE,
+ CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_NOTF_ERR_INJ_PREPARE,
CPUHP_MIPS_SOC_PREPARE,
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 2d089487d2da..a07a476178cd 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -443,6 +443,22 @@ typedef struct {
#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
+typedef struct {
+ u32 version;
+ u32 get;
+ u32 set;
+ u32 del;
+ u32 get_all;
+} apple_properties_protocol_32_t;
+
+typedef struct {
+ u64 version;
+ u64 get;
+ u64 set;
+ u64 del;
+ u64 get_all;
+} apple_properties_protocol_64_t;
+
/*
* Types and defines for EFI ResetSystem
*/
@@ -589,8 +605,10 @@ void efi_native_runtime_setup(void);
#define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
#define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+#define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61)
#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
/*
* This GUID is used to pass to the kernel proper the struct screen_info
@@ -599,6 +617,7 @@ void efi_native_runtime_setup(void);
*/
#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
#define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+#define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
typedef struct {
efi_guid_t guid;
@@ -872,6 +891,7 @@ extern struct efi {
unsigned long esrt; /* ESRT table */
unsigned long properties_table; /* properties table */
unsigned long mem_attr_table; /* memory attributes table */
+ unsigned long rng_seed; /* UEFI firmware random seed */
efi_get_time_t *get_time;
efi_set_time_t *set_time;
efi_get_wakeup_time_t *get_wakeup_time;
@@ -1145,6 +1165,26 @@ struct efi_generic_dev_path {
u16 length;
} __attribute ((packed));
+struct efi_dev_path {
+ u8 type; /* can be replaced with unnamed */
+ u8 sub_type; /* struct efi_generic_dev_path; */
+ u16 length; /* once we've moved to -std=c11 */
+ union {
+ struct {
+ u32 hid;
+ u32 uid;
+ } acpi;
+ struct {
+ u8 fn;
+ u8 dev;
+ } pci;
+ };
+} __attribute ((packed));
+
+#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER)
+struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len);
+#endif
+
static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
{
*npages = PFN_UP(*addr + (*npages<<EFI_PAGE_SHIFT)) - PFN_DOWN(*addr);
@@ -1493,4 +1533,10 @@ efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
struct efi_boot_memmap *map,
void *priv,
efi_exit_boot_map_processing priv_func);
+
+struct linux_efi_random_seed {
+ u32 size;
+ u8 bits[];
+};
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 72f0721f75e7..53144e78a369 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -232,6 +232,18 @@ struct irq_affinity_notify {
void (*release)(struct kref *ref);
};
+/**
+ * struct irq_affinity - Description for automatic irq affinity assignements
+ * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
+ * the MSI(-X) vector space
+ * @post_vectors: Don't apply affinity to @post_vectors at end of
+ * the MSI(-X) vector space
+ */
+struct irq_affinity {
+ int pre_vectors;
+ int post_vectors;
+};
+
#if defined(CONFIG_SMP)
extern cpumask_var_t irq_default_affinity;
@@ -278,8 +290,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
-int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
+struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -313,13 +325,13 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
}
static inline struct cpumask *
-irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
+irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
{
return NULL;
}
static inline int
-irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
+irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
{
return maxvec;
}
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index b7e34313cdfe..5118d3a0c9ca 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -239,7 +239,7 @@
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
-#define GITS_CBASER_VALID (1UL << 63)
+#define GITS_CBASER_VALID (1ULL << 63)
#define GITS_CBASER_SHAREABILITY_SHIFT (10)
#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53)
@@ -265,7 +265,7 @@
#define GITS_BASER_NR_REGS 8
-#define GITS_BASER_VALID (1UL << 63)
+#define GITS_BASER_VALID (1ULL << 63)
#define GITS_BASER_INDIRECT (1ULL << 62)
#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59)
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 44fda64ad434..00f776816aa3 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -78,8 +78,8 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
return kstat_cpu(cpu).irqs_sum;
}
-extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
-extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
+extern void account_user_time(struct task_struct *, cputime_t);
+extern void account_system_time(struct task_struct *, int, cputime_t);
extern void account_steal_time(cputime_t);
extern void account_idle_time(cputime_t);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 01c0b9cc3915..81ba3ba641ba 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -224,7 +224,6 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
- unsigned char fpu_counter;
struct swait_queue_head wq;
struct pid *pid;
int sigset_active;
@@ -645,6 +644,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, int offset, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
diff --git a/include/linux/list.h b/include/linux/list.h
index 5809e9a2de5b..d1039ecaf94f 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -28,27 +28,42 @@ static inline void INIT_LIST_HEAD(struct list_head *list)
list->prev = list;
}
+#ifdef CONFIG_DEBUG_LIST
+extern bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next);
+extern bool __list_del_entry_valid(struct list_head *entry);
+#else
+static inline bool __list_add_valid(struct list_head *new,
+ struct list_head *prev,
+ struct list_head *next)
+{
+ return true;
+}
+static inline bool __list_del_entry_valid(struct list_head *entry)
+{
+ return true;
+}
+#endif
+
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
+ if (!__list_add_valid(new, prev, next))
+ return;
+
next->prev = new;
new->next = next;
new->prev = prev;
WRITE_ONCE(prev->next, new);
}
-#else
-extern void __list_add(struct list_head *new,
- struct list_head *prev,
- struct list_head *next);
-#endif
/**
* list_add - add a new entry
@@ -96,22 +111,20 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_del_entry(struct list_head *entry)
{
+ if (!__list_del_entry_valid(entry))
+ return;
+
__list_del(entry->prev, entry->next);
}
static inline void list_del(struct list_head *entry)
{
- __list_del(entry->prev, entry->next);
+ __list_del_entry(entry);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
-#else
-extern void __list_del_entry(struct list_head *entry);
-extern void list_del(struct list_head *entry);
-#endif
/**
* list_replace - replace old entry by new one
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index a585b4b5fa0e..0661af17a758 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -16,6 +16,7 @@
#include <asm/mc146818rtc.h> /* register access macros */
#include <linux/bcd.h>
#include <linux/delay.h>
+#include <linux/pm-trace.h>
#ifdef __KERNEL__
#include <linux/spinlock.h> /* spinlock_t */
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
deleted file mode 100644
index 4ac8b1977b73..000000000000
--- a/include/linux/mutex-debug.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef __LINUX_MUTEX_DEBUG_H
-#define __LINUX_MUTEX_DEBUG_H
-
-#include <linux/linkage.h>
-#include <linux/lockdep.h>
-#include <linux/debug_locks.h>
-
-/*
- * Mutexes - debugging helpers:
- */
-
-#define __DEBUG_MUTEX_INITIALIZER(lockname) \
- , .magic = &lockname
-
-#define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
-} while (0)
-
-extern void mutex_destroy(struct mutex *lock);
-
-#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 2cb7531e7d7a..b97870f2debd 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -18,6 +18,7 @@
#include <linux/atomic.h>
#include <asm/processor.h>
#include <linux/osq_lock.h>
+#include <linux/debug_locks.h>
/*
* Simple, straightforward mutexes with strict semantics:
@@ -48,16 +49,12 @@
* locks and tasks (and only those tasks)
*/
struct mutex {
- /* 1: unlocked, 0: locked, negative: locked, possible waiters */
- atomic_t count;
+ atomic_long_t owner;
spinlock_t wait_lock;
- struct list_head wait_list;
-#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
- struct task_struct *owner;
-#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
#endif
+ struct list_head wait_list;
#ifdef CONFIG_DEBUG_MUTEXES
void *magic;
#endif
@@ -66,6 +63,11 @@ struct mutex {
#endif
};
+static inline struct task_struct *__mutex_owner(struct mutex *lock)
+{
+ return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x03);
+}
+
/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
@@ -79,9 +81,20 @@ struct mutex_waiter {
};
#ifdef CONFIG_DEBUG_MUTEXES
-# include <linux/mutex-debug.h>
+
+#define __DEBUG_MUTEX_INITIALIZER(lockname) \
+ , .magic = &lockname
+
+extern void mutex_destroy(struct mutex *lock);
+
#else
+
# define __DEBUG_MUTEX_INITIALIZER(lockname)
+
+static inline void mutex_destroy(struct mutex *lock) {}
+
+#endif
+
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
@@ -90,14 +103,12 @@ struct mutex_waiter {
*
* It is not allowed to initialize an already locked mutex.
*/
-# define mutex_init(mutex) \
-do { \
- static struct lock_class_key __key; \
- \
- __mutex_init((mutex), #mutex, &__key); \
+#define mutex_init(mutex) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __mutex_init((mutex), #mutex, &__key); \
} while (0)
-static inline void mutex_destroy(struct mutex *lock) {}
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -107,7 +118,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
#define __MUTEX_INITIALIZER(lockname) \
- { .count = ATOMIC_INIT(1) \
+ { .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
, .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
__DEBUG_MUTEX_INITIALIZER(lockname) \
@@ -127,7 +138,10 @@ extern void __mutex_init(struct mutex *lock, const char *name,
*/
static inline int mutex_is_locked(struct mutex *lock)
{
- return atomic_read(&lock->count) != 1;
+ /*
+ * XXX think about spin_is_locked
+ */
+ return __mutex_owner(lock) != NULL;
}
/*
@@ -175,4 +189,35 @@ extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+/*
+ * These values are chosen such that FAIL and SUCCESS match the
+ * values of the regular mutex_trylock().
+ */
+enum mutex_trylock_recursive_enum {
+ MUTEX_TRYLOCK_FAILED = 0,
+ MUTEX_TRYLOCK_SUCCESS = 1,
+ MUTEX_TRYLOCK_RECURSIVE,
+};
+
+/**
+ * mutex_trylock_recursive - trylock variant that allows recursive locking
+ * @lock: mutex to be locked
+ *
+ * This function should not be used, _ever_. It is purely for hysterical GEM
+ * raisins, and once those are gone this will be removed.
+ *
+ * Returns:
+ * MUTEX_TRYLOCK_FAILED - trylock failed,
+ * MUTEX_TRYLOCK_SUCCESS - lock acquired,
+ * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
+ */
+static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
+mutex_trylock_recursive(struct mutex *lock)
+{
+ if (unlikely(__mutex_owner(lock) == current))
+ return MUTEX_TRYLOCK_RECURSIVE;
+
+ return mutex_trylock(lock);
+}
+
#endif /* __LINUX_MUTEX_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4e6144d4a860..e2d1a124216a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -244,6 +244,7 @@ struct pci_cap_saved_state {
struct pci_cap_saved_data cap;
};
+struct irq_affinity;
struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
@@ -332,7 +333,6 @@ struct pci_dev {
* directly, use the values stored here. They might be different!
*/
unsigned int irq;
- struct cpumask *irq_affinity;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
bool match_driver; /* Skip attaching driver */
@@ -1327,8 +1327,10 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev,
return rc;
return 0;
}
-int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
- unsigned int max_vecs, unsigned int flags);
+int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *affd);
+
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
@@ -1356,14 +1358,17 @@ static inline int pci_enable_msix_range(struct pci_dev *dev,
static inline int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
{ return -ENOSYS; }
-static inline int pci_alloc_irq_vectors(struct pci_dev *dev,
- unsigned int min_vecs, unsigned int max_vecs,
- unsigned int flags)
+
+static inline int
+pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags,
+ const struct irq_affinity *aff_desc)
{
if (min_vecs > 1)
return -EINVAL;
return 1;
}
+
static inline void pci_free_irq_vectors(struct pci_dev *dev)
{
}
@@ -1381,6 +1386,14 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
}
#endif
+static inline int
+pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
+ unsigned int max_vecs, unsigned int flags)
+{
+ return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags,
+ NULL);
+}
+
#ifdef CONFIG_PCIEPORTBUS
extern bool pcie_ports_disabled;
extern bool pcie_ports_auto;
diff --git a/include/linux/pm-trace.h b/include/linux/pm-trace.h
index ecbde7a5548e..7b78793f07d7 100644
--- a/include/linux/pm-trace.h
+++ b/include/linux/pm-trace.h
@@ -1,11 +1,17 @@
#ifndef PM_TRACE_H
#define PM_TRACE_H
+#include <linux/types.h>
#ifdef CONFIG_PM_TRACE
#include <asm/pm-trace.h>
-#include <linux/types.h>
extern int pm_trace_enabled;
+extern bool pm_trace_rtc_abused;
+
+static inline bool pm_trace_rtc_valid(void)
+{
+ return !pm_trace_rtc_abused;
+}
static inline int pm_trace_is_enabled(void)
{
@@ -24,6 +30,7 @@ extern int show_trace_dev_match(char *buf, size_t size);
#else
+static inline bool pm_trace_rtc_valid(void) { return true; }
static inline int pm_trace_is_enabled(void) { return 0; }
#define TRACE_DEVICE(dev) do { } while (0)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 75e4e30677f1..7eeceac52dea 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -65,19 +65,24 @@
/*
* Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
+ *
+ * in_irq() - We're in (hard) IRQ context
+ * in_softirq() - We have BH disabled, or are processing softirqs
+ * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
+ * in_serving_softirq() - We're in softirq context
+ * in_nmi() - We're in NMI context
+ * in_task() - We're in task context
+ *
+ * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
+ * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_nmi() (preempt_count() & NMI_MASK)
+#define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index b76d47aba564..a026bfd089db 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -130,30 +130,6 @@ struct ptp_clock_info {
struct ptp_clock;
-/**
- * ptp_clock_register() - register a PTP hardware clock driver
- *
- * @info: Structure describing the new clock.
- * @parent: Pointer to the parent device of the new clock.
- *
- * Returns a valid pointer on success or PTR_ERR on failure. If PHC
- * support is missing at the configuration level, this function
- * returns NULL, and drivers are expected to gracefully handle that
- * case separately.
- */
-
-extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
- struct device *parent);
-
-/**
- * ptp_clock_unregister() - unregister a PTP hardware clock driver
- *
- * @ptp: The clock to remove from service.
- */
-
-extern int ptp_clock_unregister(struct ptp_clock *ptp);
-
-
enum ptp_clock_events {
PTP_CLOCK_ALARM,
PTP_CLOCK_EXTTS,
@@ -179,6 +155,31 @@ struct ptp_clock_event {
};
};
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
+/**
+ * ptp_clock_register() - register a PTP hardware clock driver
+ *
+ * @info: Structure describing the new clock.
+ * @parent: Pointer to the parent device of the new clock.
+ *
+ * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * support is missing at the configuration level, this function
+ * returns NULL, and drivers are expected to gracefully handle that
+ * case separately.
+ */
+
+extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent);
+
+/**
+ * ptp_clock_unregister() - unregister a PTP hardware clock driver
+ *
+ * @ptp: The clock to remove from service.
+ */
+
+extern int ptp_clock_unregister(struct ptp_clock *ptp);
+
/**
* ptp_clock_event() - notify the PTP layer about an event
*
@@ -210,4 +211,20 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
int ptp_find_pin(struct ptp_clock *ptp,
enum ptp_pin_function func, unsigned int chan);
+#else
+static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
+ struct device *parent)
+{ return NULL; }
+static inline int ptp_clock_unregister(struct ptp_clock *ptp)
+{ return 0; }
+static inline void ptp_clock_event(struct ptp_clock *ptp,
+ struct ptp_clock_event *event)
+{ }
+static inline int ptp_clock_index(struct ptp_clock *ptp)
+{ return -1; }
+static inline int ptp_find_pin(struct ptp_clock *ptp,
+ enum ptp_pin_function func, unsigned int chan)
+{ return -1; }
+#endif
+
#endif
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 8beb98dcf14f..4f7a9561b8c4 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -45,19 +45,17 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
-#ifndef CONFIG_DEBUG_LIST
static inline void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
+ if (!__list_add_valid(new, prev, next))
+ return;
+
new->next = next;
new->prev = prev;
rcu_assign_pointer(list_next_rcu(prev), new);
next->prev = new;
}
-#else
-void __list_add_rcu(struct list_head *new,
- struct list_head *prev, struct list_head *next);
-#endif
/**
* list_add_rcu - add a new entry to rcu-protected list
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 4acc552e9279..b6d4568795a7 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -198,4 +198,10 @@ enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
};
+#ifdef CONFIG_RING_BUFFER
+int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
+#else
+#define trace_rb_cpu_prepare NULL
+#endif
+
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a3d338e15b17..6b3e6ef98d7c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -262,20 +262,9 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
- smp_store_mb((tsk)->state, (state_value)); \
+ smp_store_mb((tsk)->state, (state_value)); \
} while (0)
-/*
- * set_current_state() includes a barrier so that the write of current->state
- * is correctly serialised wrt the caller's subsequent test of whether to
- * actually sleep:
- *
- * set_current_state(TASK_UNINTERRUPTIBLE);
- * if (do_i_need_to_sleep())
- * schedule();
- *
- * If the caller does not need such serialisation then use __set_current_state()
- */
#define __set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
@@ -284,11 +273,19 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->state, (state_value)); \
+ smp_store_mb(current->state, (state_value)); \
} while (0)
#else
+/*
+ * @tsk had better be current, or you get to keep the pieces.
+ *
+ * The only reason is that computing current can be more expensive than
+ * using a pointer that's already available.
+ *
+ * Therefore, see set_current_state().
+ */
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
@@ -299,11 +296,34 @@ extern char ___assert_task_state[1 - 2*!!(
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
+ * for (;;) {
* set_current_state(TASK_UNINTERRUPTIBLE);
- * if (do_i_need_to_sleep())
- * schedule();
+ * if (!need_sleep)
+ * break;
+ *
+ * schedule();
+ * }
+ * __set_current_state(TASK_RUNNING);
+ *
+ * If the caller does not need such serialisation (because, for instance, the
+ * condition test and condition change and wakeup are under the same lock) then
+ * use __set_current_state().
+ *
+ * The above is typically ordered against the wakeup, which does:
+ *
+ * need_sleep = false;
+ * wake_up_state(p, TASK_UNINTERRUPTIBLE);
+ *
+ * Where wake_up_state() (and all other wakeup primitives) imply enough
+ * barriers to order the store of the variable against wakeup.
*
- * If the caller does not need such serialisation then use __set_current_state()
+ * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
+ * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
+ * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
+ *
+ * This is obviously fine, since they both store the exact same value.
+ *
+ * Also see the comments of try_to_wake_up().
*/
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
@@ -989,7 +1009,7 @@ enum cpu_idle_type {
* already in a wake queue, the wakeup will happen soon and the second
* waker can just skip it.
*
- * The WAKE_Q macro declares and initializes the list head.
+ * The DEFINE_WAKE_Q macro declares and initializes the list head.
* wake_up_q() does NOT reinitialize the list; it's expected to be
* called near the end of a function, where the fact that the queue is
* not used again will be easy to see by inspection.
@@ -1009,7 +1029,7 @@ struct wake_q_head {
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-#define WAKE_Q(name) \
+#define DEFINE_WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
@@ -1057,6 +1077,8 @@ static inline int cpu_numa_flags(void)
}
#endif
+extern int arch_asym_cpu_priority(int cpu);
+
struct sched_domain_attr {
int relax_domain_level;
};
@@ -1627,7 +1649,10 @@ struct task_struct {
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t utime, stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ cputime_t utimescaled, stimescaled;
+#endif
cputime_t gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -1791,6 +1816,9 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
+#ifdef CONFIG_INTEL_RDT_A
+ int closid;
+#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
@@ -2220,34 +2248,38 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime);
-extern void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled, cputime_t *stimescaled);
extern cputime_t task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime)
{
- if (utime)
- *utime = t->utime;
- if (stime)
- *stime = t->stime;
+ *utime = t->utime;
+ *stime = t->stime;
}
+static inline cputime_t task_gtime(struct task_struct *t)
+{
+ return t->gtime;
+}
+#endif
+
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
static inline void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
{
- if (utimescaled)
- *utimescaled = t->utimescaled;
- if (stimescaled)
- *stimescaled = t->stimescaled;
+ *utimescaled = t->utimescaled;
+ *stimescaled = t->stimescaled;
}
-
-static inline cputime_t task_gtime(struct task_struct *t)
+#else
+static inline void task_cputime_scaled(struct task_struct *t,
+ cputime_t *utimescaled,
+ cputime_t *stimescaled)
{
- return t->gtime;
+ task_cputime(t, utimescaled, stimescaled);
}
#endif
+
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
@@ -2445,6 +2477,10 @@ static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
+#ifndef cpu_relax_yield
+#define cpu_relax_yield() cpu_relax()
+#endif
+
/*
* Do not use outside of architecture code which knows its limitations.
*
@@ -3509,6 +3545,18 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
#endif /* CONFIG_SMP */
+/*
+ * In order to reduce various lock holder preemption latencies provide an
+ * interface to see if a vCPU is currently running or not.
+ *
+ * This allows us to terminate optimistic spin loops and block, analogous to
+ * the native optimistic spin heuristic of testing if the lock owner task is
+ * running or not.
+ */
+#ifndef vcpu_is_preempted
+# define vcpu_is_preempted(cpu) false
+#endif
+
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 22db1e63707e..441145351301 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -36,7 +36,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
diff --git a/include/linux/time.h b/include/linux/time.h
index 4cea09d94208..23f0f5ce3090 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -172,8 +172,6 @@ extern int do_setitimer(int which, struct itimerval *value,
struct itimerval *ovalue);
extern int do_getitimer(int which, struct itimerval *value);
-extern unsigned int alarm_setitimer(unsigned int seconds);
-
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
struct tms;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 09168c52ab64..361f8bf1429d 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -249,6 +249,7 @@ static inline u64 ktime_get_raw_ns(void)
extern u64 ktime_get_mono_fast_ns(void);
extern u64 ktime_get_raw_fast_ns(void);
+extern u64 ktime_get_boot_fast_ns(void);
/*
* Timespec interfaces utilizing the ktime based ones
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 2bb5deb0012e..7b0066814fa0 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -120,7 +120,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
{
ctx->task = current;
- ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
+ ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
ctx->acquired = 0;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;
diff --git a/include/net/flow.h b/include/net/flow.h
index 6bbbca8af8e3..6984f1913dc1 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -246,6 +246,7 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
void *ctx);
int flow_cache_init(struct net *net);
void flow_cache_fini(struct net *net);
+void flow_cache_hp_init(void);
void flow_cache_flush(struct net *net);
void flow_cache_flush_deferred(struct net *net);
diff --git a/include/net/flowcache.h b/include/net/flowcache.h
index c8f665ec6e0d..9caf3bfc8d2d 100644
--- a/include/net/flowcache.h
+++ b/include/net/flowcache.h
@@ -17,7 +17,7 @@ struct flow_cache_percpu {
struct flow_cache {
u32 hash_shift;
struct flow_cache_percpu __percpu *percpu;
- struct notifier_block hotcpu_notifier;
+ struct hlist_node node;
int low_watermark;
int high_watermark;
struct timer_list rnd_timer;
diff --git a/include/trace/events/alarmtimer.h b/include/trace/events/alarmtimer.h
new file mode 100644
index 000000000000..a1c108c16c9c
--- /dev/null
+++ b/include/trace/events/alarmtimer.h
@@ -0,0 +1,96 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM alarmtimer
+
+#if !defined(_TRACE_ALARMTIMER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ALARMTIMER_H
+
+#include <linux/alarmtimer.h>
+#include <linux/rtc.h>
+#include <linux/tracepoint.h>
+
+TRACE_DEFINE_ENUM(ALARM_REALTIME);
+TRACE_DEFINE_ENUM(ALARM_BOOTTIME);
+TRACE_DEFINE_ENUM(ALARM_REALTIME_FREEZER);
+TRACE_DEFINE_ENUM(ALARM_BOOTTIME_FREEZER);
+
+#define show_alarm_type(type) __print_flags(type, " | ", \
+ { 1 << ALARM_REALTIME, "REALTIME" }, \
+ { 1 << ALARM_BOOTTIME, "BOOTTIME" }, \
+ { 1 << ALARM_REALTIME_FREEZER, "REALTIME Freezer" }, \
+ { 1 << ALARM_BOOTTIME_FREEZER, "BOOTTIME Freezer" })
+
+TRACE_EVENT(alarmtimer_suspend,
+
+ TP_PROTO(ktime_t expires, int flag),
+
+ TP_ARGS(expires, flag),
+
+ TP_STRUCT__entry(
+ __field(s64, expires)
+ __field(unsigned char, alarm_type)
+ ),
+
+ TP_fast_assign(
+ __entry->expires = expires.tv64;
+ __entry->alarm_type = flag;
+ ),
+
+ TP_printk("alarmtimer type:%s expires:%llu",
+ show_alarm_type((1 << __entry->alarm_type)),
+ __entry->expires
+ )
+);
+
+DECLARE_EVENT_CLASS(alarm_class,
+
+ TP_PROTO(struct alarm *alarm, ktime_t now),
+
+ TP_ARGS(alarm, now),
+
+ TP_STRUCT__entry(
+ __field(void *, alarm)
+ __field(unsigned char, alarm_type)
+ __field(s64, expires)
+ __field(s64, now)
+ ),
+
+ TP_fast_assign(
+ __entry->alarm = alarm;
+ __entry->alarm_type = alarm->type;
+ __entry->expires = alarm->node.expires.tv64;
+ __entry->now = now.tv64;
+ ),
+
+ TP_printk("alarmtimer:%p type:%s expires:%llu now:%llu",
+ __entry->alarm,
+ show_alarm_type((1 << __entry->alarm_type)),
+ __entry->expires,
+ __entry->now
+ )
+);
+
+DEFINE_EVENT(alarm_class, alarmtimer_fired,
+
+ TP_PROTO(struct alarm *alarm, ktime_t now),
+
+ TP_ARGS(alarm, now)
+);
+
+DEFINE_EVENT(alarm_class, alarmtimer_start,
+
+ TP_PROTO(struct alarm *alarm, ktime_t now),
+
+ TP_ARGS(alarm, now)
+);
+
+DEFINE_EVENT(alarm_class, alarmtimer_cancel,
+
+ TP_PROTO(struct alarm *alarm, ktime_t now),
+
+ TP_ARGS(alarm, now)
+);
+
+#endif /* _TRACE_ALARMTIMER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d3e756539d44..9d4f9b3a2b7b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -698,7 +698,10 @@ TRACE_EVENT(rcu_batch_end,
/*
* Tracepoint for rcutorture readers. The first argument is the name
* of the RCU flavor from rcutorture's viewpoint and the second argument
- * is the callback address.
+ * is the callback address. The third argument is the start time in
+ * seconds, and the last two arguments are the grace period numbers
+ * at the beginning and end of the read, respectively. Note that the
+ * callback address can be NULL.
*/
TRACE_EVENT(rcu_torture_read,
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 9bd559472c92..e230af2e6855 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -57,6 +57,7 @@
#define CGROUP_SUPER_MAGIC 0x27e0eb
#define CGROUP2_SUPER_MAGIC 0x63677270
+#define RDTGROUP_SUPER_MAGIC 0x7655821
#define STACK_END_MAGIC 0x57AC6E9D
diff --git a/init/Kconfig b/init/Kconfig
index b4f1fdce1231..aafafeb0c117 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1457,6 +1457,23 @@ config SYSCTL_SYSCALL
If unsure say N here.
+config POSIX_TIMERS
+ bool "Posix Clocks & timers" if EXPERT
+ default y
+ help
+ This includes native support for POSIX timers to the kernel.
+ Some embedded systems have no use for them and therefore they
+ can be configured out to reduce the size of the kernel image.
+
+ When this option is disabled, the following syscalls won't be
+ available: timer_create, timer_gettime: timer_getoverrun,
+ timer_settime, timer_delete, clock_adjtime, getitimer,
+ setitimer, alarm. Furthermore, the clock_settime, clock_gettime,
+ clock_getres and clock_nanosleep syscalls will be limited to
+ CLOCK_REALTIME, CLOCK_MONOTONIC and CLOCK_BOOTTIME only.
+
+ If unsure say y.
+
config KALLSYMS
bool "Load all symbols for debugging/ksymoops" if EXPERT
default y
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 8cbd6e6894d5..7a2d8f0c8ae5 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -967,7 +967,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
struct timespec ts;
struct posix_msg_tree_node *new_leaf = NULL;
int ret = 0;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
if (u_abs_timeout) {
int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -1151,7 +1151,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
msg_ptr = wait.msg;
}
} else {
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
msg_ptr = msg_get(info);
diff --git a/ipc/msg.c b/ipc/msg.c
index e12307d0c920..32e9bd837cde 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -235,7 +235,7 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
{
struct msg_msg *msg, *t;
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
expunge_all(msq, -EIDRM, &wake_q);
ss_wakeup(msq, &wake_q, true);
@@ -397,7 +397,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
goto out_up;
case IPC_SET:
{
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
!capable(CAP_SYS_RESOURCE)) {
@@ -634,7 +634,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
struct msg_msg *msg;
int err;
struct ipc_namespace *ns;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
ns = current->nsproxy->ipc_ns;
@@ -850,7 +850,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
struct msg_queue *msq;
struct ipc_namespace *ns;
struct msg_msg *msg, *copy = NULL;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
ns = current->nsproxy->ipc_ns;
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index ebdb0043203a..84d882f3e299 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -225,7 +225,7 @@ config ARCH_SUPPORTS_ATOMIC_RMW
config MUTEX_SPIN_ON_OWNER
def_bool y
- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
config RWSEM_SPIN_ON_OWNER
def_bool y
diff --git a/kernel/compat.c b/kernel/compat.c
index 333d364be29d..b3a047f208a7 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -307,12 +307,17 @@ static inline long put_compat_itimerval(struct compat_itimerval __user *o,
__put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
}
+asmlinkage long sys_ni_posix_timers(void);
+
COMPAT_SYSCALL_DEFINE2(getitimer, int, which,
struct compat_itimerval __user *, it)
{
struct itimerval kit;
int error;
+ if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
+ return sys_ni_posix_timers();
+
error = do_getitimer(which, &kit);
if (!error && put_compat_itimerval(it, &kit))
error = -EFAULT;
@@ -326,6 +331,9 @@ COMPAT_SYSCALL_DEFINE3(setitimer, int, which,
struct itimerval kin, kout;
int error;
+ if (!IS_ENABLED(CONFIG_POSIX_TIMERS))
+ return sys_ni_posix_timers();
+
if (in) {
if (get_compat_itimerval(&kin, in))
return -EFAULT;
diff --git a/kernel/exit.c b/kernel/exit.c
index 3076f3089919..aacff8e2aec0 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -54,6 +54,7 @@
#include <linux/writeback.h>
#include <linux/shm.h>
#include <linux/kcov.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -91,11 +92,10 @@ static void __exit_signal(struct task_struct *tsk)
lockdep_tasklist_lock_is_held());
spin_lock(&sighand->siglock);
+#ifdef CONFIG_POSIX_TIMERS
posix_cpu_timers_exit(tsk);
if (group_dead) {
posix_cpu_timers_exit_group(tsk);
- tty = sig->tty;
- sig->tty = NULL;
} else {
/*
* This can only happen if the caller is de_thread().
@@ -104,7 +104,13 @@ static void __exit_signal(struct task_struct *tsk)
*/
if (unlikely(has_group_leader_pid(tsk)))
posix_cpu_timers_exit_group(tsk);
+ }
+#endif
+ if (group_dead) {
+ tty = sig->tty;
+ sig->tty = NULL;
+ } else {
/*
* If there is any task waiting for the group exit
* then notify it:
@@ -116,6 +122,9 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk);
}
+ add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
+ sizeof(unsigned long long));
+
/*
* Accumulate here the counters for all threads as they die. We could
* skip the group leader because it is the last user of signal_struct,
@@ -799,8 +808,10 @@ void __noreturn do_exit(long code)
acct_update_integrals(tsk);
group_dead = atomic_dec_and_test(&tsk->signal->live);
if (group_dead) {
+#ifdef CONFIG_POSIX_TIMERS
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
+#endif
if (tsk->mm)
setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index a8eb821d1d4b..bdeebe8d55cd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1345,8 +1345,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
seqlock_init(&sig->stats_lock);
prev_cputime_init(&sig->prev_cputime);
+#ifdef CONFIG_POSIX_TIMERS
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
+#endif
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
@@ -1551,7 +1553,9 @@ static __latent_entropy struct task_struct *copy_process(
init_sigpending(&p->pending);
p->utime = p->stime = p->gtime = 0;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
p->utimescaled = p->stimescaled = 0;
+#endif
prev_cputime_init(&p->prev_cputime);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
diff --git a/kernel/futex.c b/kernel/futex.c
index 2c4be467fecd..9246d9f593d1 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1298,7 +1298,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
bool deboost;
int ret = 0;
@@ -1415,7 +1415,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
int ret;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
if (!bitset)
return -EINVAL;
@@ -1469,7 +1469,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
int ret, op_ret;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
retry:
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
@@ -1708,7 +1708,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
struct futex_pi_state *pi_state = NULL;
struct futex_hash_bucket *hb1, *hb2;
struct futex_q *this, *next;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
if (requeue_pi) {
/*
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 17f51d63da56..9be9bda7c1f9 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -51,16 +51,17 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
/**
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
- * @affinity: The affinity mask to spread. If NULL cpu_online_mask
- * is used
- * @nvecs: The number of vectors
+ * @nvecs: The total number of vectors
+ * @affd: Description of the affinity requirements
*
* Returns the masks pointer or NULL if allocation failed.
*/
-struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
- int nvec)
+struct cpumask *
+irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
{
- int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0;
+ int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
+ int affv = nvecs - affd->pre_vectors - affd->post_vectors;
+ int last_affv = affv + affd->pre_vectors;
nodemask_t nodemsk = NODE_MASK_NONE;
struct cpumask *masks;
cpumask_var_t nmsk;
@@ -68,46 +69,47 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
return NULL;
- masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL);
+ masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
if (!masks)
goto out;
+ /* Fill out vectors at the beginning that don't need affinity */
+ for (curvec = 0; curvec < affd->pre_vectors; curvec++)
+ cpumask_copy(masks + curvec, irq_default_affinity);
+
/* Stabilize the cpumasks */
get_online_cpus();
- /* If the supplied affinity mask is NULL, use cpu online mask */
- if (!affinity)
- affinity = cpu_online_mask;
-
- nodes = get_nodes_in_cpumask(affinity, &nodemsk);
+ nodes = get_nodes_in_cpumask(cpu_online_mask, &nodemsk);
/*
* If the number of nodes in the mask is less than or equal the
* number of vectors we just spread the vectors across the nodes.
*/
- if (nvec <= nodes) {
+ if (affv <= nodes) {
for_each_node_mask(n, nodemsk) {
cpumask_copy(masks + curvec, cpumask_of_node(n));
- if (++curvec == nvec)
+ if (++curvec == last_affv)
break;
}
- goto outonl;
+ goto done;
}
/* Spread the vectors per node */
- vecs_per_node = nvec / nodes;
+ vecs_per_node = affv / nodes;
/* Account for rounding errors */
- extra_vecs = nvec - (nodes * vecs_per_node);
+ extra_vecs = affv - (nodes * vecs_per_node);
for_each_node_mask(n, nodemsk) {
int ncpus, v, vecs_to_assign = vecs_per_node;
/* Get the cpus on this node which are in the mask */
- cpumask_and(nmsk, affinity, cpumask_of_node(n));
+ cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk);
- for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) {
+ for (v = 0; curvec < last_affv && v < vecs_to_assign;
+ curvec++, v++) {
cpus_per_vec = ncpus / vecs_to_assign;
/* Account for extra vectors to compensate rounding errors */
@@ -119,36 +121,36 @@ struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
}
- if (curvec >= nvec)
+ if (curvec >= last_affv)
break;
}
-outonl:
+done:
put_online_cpus();
+
+ /* Fill out vectors at the end that don't need affinity */
+ for (; curvec < nvecs; curvec++)
+ cpumask_copy(masks + curvec, irq_default_affinity);
out:
free_cpumask_var(nmsk);
return masks;
}
/**
- * irq_calc_affinity_vectors - Calculate to optimal number of vectors for a given affinity mask
- * @affinity: The affinity mask to spread. If NULL cpu_online_mask
- * is used
- * @maxvec: The maximum number of vectors available
+ * irq_calc_affinity_vectors - Calculate the optimal number of vectors
+ * @maxvec: The maximum number of vectors available
+ * @affd: Description of the affinity requirements
*/
-int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
+int irq_calc_affinity_vectors(int maxvec, const struct irq_affinity *affd)
{
- int cpus, ret;
+ int resv = affd->pre_vectors + affd->post_vectors;
+ int vecs = maxvec - resv;
+ int cpus;
/* Stabilize the cpumasks */
get_online_cpus();
- /* If the supplied affinity mask is NULL, use cpu online mask */
- if (!affinity)
- affinity = cpu_online_mask;
-
- cpus = cpumask_weight(affinity);
- ret = (cpus < maxvec) ? cpus : maxvec;
-
+ cpus = cpumask_weight(cpu_online_mask);
put_online_cpus();
- return ret;
+
+ return min(cpus, vecs) + resv;
}
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 8a3e872798f3..ee230063f033 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -14,9 +14,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
-
-/* Temparory solution for building, will be removed later */
-#include <linux/pci.h>
+#include <linux/slab.h>
/**
* alloc_msi_entry - Allocate an initialize msi_entry
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index f8cff6874d04..7c38f8f3d97b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -840,9 +840,9 @@ static struct lock_list *alloc_list_entry(void)
/*
* Add a new dependency to the head of the list:
*/
-static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
- struct list_head *head, unsigned long ip,
- int distance, struct stack_trace *trace)
+static int add_lock_to_list(struct lock_class *this, struct list_head *head,
+ unsigned long ip, int distance,
+ struct stack_trace *trace)
{
struct lock_list *entry;
/*
@@ -1868,14 +1868,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
- ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
+ ret = add_lock_to_list(hlock_class(next),
&hlock_class(prev)->locks_after,
next->acquire_ip, distance, &trace);
if (!ret)
return 0;
- ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
+ ret = add_lock_to_list(hlock_class(prev),
&hlock_class(next)->locks_before,
next->acquire_ip, distance, &trace);
if (!ret)
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index c835270f0c2f..6a385aabcce7 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -28,7 +28,7 @@ struct mcs_spinlock {
#define arch_mcs_spin_lock_contended(l) \
do { \
while (!(smp_load_acquire(l))) \
- cpu_relax_lowlatency(); \
+ cpu_relax(); \
} while (0)
#endif
@@ -108,7 +108,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
return;
/* Wait until the next pointer is set */
while (!(next = READ_ONCE(node->next)))
- cpu_relax_lowlatency();
+ cpu_relax();
}
/* Pass lock to next waiter. */
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 9c951fade415..9aa713629387 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -73,21 +73,8 @@ void debug_mutex_unlock(struct mutex *lock)
{
if (likely(debug_locks)) {
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
-
- if (!lock->owner)
- DEBUG_LOCKS_WARN_ON(!lock->owner);
- else
- DEBUG_LOCKS_WARN_ON(lock->owner != current);
-
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
}
-
- /*
- * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
- * mutexes so that we can do it here after we've verified state.
- */
- mutex_clear_owner(lock);
- atomic_set(&lock->count, 1);
}
void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 57a871ae3c81..a459faa48987 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -27,16 +27,6 @@ extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
-static inline void mutex_set_owner(struct mutex *lock)
-{
- WRITE_ONCE(lock->owner, current);
-}
-
-static inline void mutex_clear_owner(struct mutex *lock)
-{
- WRITE_ONCE(lock->owner, NULL);
-}
-
#define spin_lock_mutex(lock, flags) \
do { \
struct mutex *l = container_of(lock, struct mutex, wait_lock); \
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90db3909..9b349619f431 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -27,41 +27,176 @@
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>
-/*
- * In the DEBUG case we are using the "NULL fastpath" for mutexes,
- * which forces all calls into the slowpath:
- */
#ifdef CONFIG_DEBUG_MUTEXES
# include "mutex-debug.h"
-# include <asm-generic/mutex-null.h>
-/*
- * Must be 0 for the debug case so we do not do the unlock outside of the
- * wait_lock region. debug_mutex_unlock() will do the actual unlock in this
- * case.
- */
-# undef __mutex_slowpath_needs_to_unlock
-# define __mutex_slowpath_needs_to_unlock() 0
#else
# include "mutex.h"
-# include <asm/mutex.h>
#endif
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
- atomic_set(&lock->count, 1);
+ atomic_long_set(&lock->owner, 0);
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
- mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
#endif
debug_mutex_init(lock, name, key);
}
-
EXPORT_SYMBOL(__mutex_init);
+/*
+ * @owner: contains: 'struct task_struct *' to the current lock owner,
+ * NULL means not owned. Since task_struct pointers are aligned at
+ * ARCH_MIN_TASKALIGN (which is at least sizeof(void *)), we have low
+ * bits to store extra state.
+ *
+ * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
+ * Bit1 indicates unlock needs to hand the lock to the top-waiter
+ */
+#define MUTEX_FLAG_WAITERS 0x01
+#define MUTEX_FLAG_HANDOFF 0x02
+
+#define MUTEX_FLAGS 0x03
+
+static inline struct task_struct *__owner_task(unsigned long owner)
+{
+ return (struct task_struct *)(owner & ~MUTEX_FLAGS);
+}
+
+static inline unsigned long __owner_flags(unsigned long owner)
+{
+ return owner & MUTEX_FLAGS;
+}
+
+/*
+ * Actual trylock that will work on any unlocked state.
+ *
+ * When setting the owner field, we must preserve the low flag bits.
+ *
+ * Be careful with @handoff, only set that in a wait-loop (where you set
+ * HANDOFF) to avoid recursive lock attempts.
+ */
+static inline bool __mutex_trylock(struct mutex *lock, const bool handoff)
+{
+ unsigned long owner, curr = (unsigned long)current;
+
+ owner = atomic_long_read(&lock->owner);
+ for (;;) { /* must loop, can race against a flag */
+ unsigned long old, flags = __owner_flags(owner);
+
+ if (__owner_task(owner)) {
+ if (handoff && unlikely(__owner_task(owner) == current)) {
+ /*
+ * Provide ACQUIRE semantics for the lock-handoff.
+ *
+ * We cannot easily use load-acquire here, since
+ * the actual load is a failed cmpxchg, which
+ * doesn't imply any barriers.
+ *
+ * Also, this is a fairly unlikely scenario, and
+ * this contains the cost.
+ */
+ smp_mb(); /* ACQUIRE */
+ return true;
+ }
+
+ return false;
+ }
+
+ /*
+ * We set the HANDOFF bit, we must make sure it doesn't live
+ * past the point where we acquire it. This would be possible
+ * if we (accidentally) set the bit on an unlocked mutex.
+ */
+ if (handoff)
+ flags &= ~MUTEX_FLAG_HANDOFF;
+
+ old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
+ if (old == owner)
+ return true;
+
+ owner = old;
+ }
+}
+
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * Lockdep annotations are contained to the slow paths for simplicity.
+ * There is nothing that would stop spreading the lockdep annotations outwards
+ * except more code.
+ */
+
+/*
+ * Optimistic trylock that only works in the uncontended case. Make sure to
+ * follow with a __mutex_trylock() before failing.
+ */
+static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
+{
+ unsigned long curr = (unsigned long)current;
+
+ if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
+ return true;
+
+ return false;
+}
+
+static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
+{
+ unsigned long curr = (unsigned long)current;
+
+ if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
+ return true;
+
+ return false;
+}
+#endif
+
+static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
+{
+ atomic_long_or(flag, &lock->owner);
+}
+
+static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
+{
+ atomic_long_andnot(flag, &lock->owner);
+}
+
+static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
+}
+
+/*
+ * Give up ownership to a specific task, when @task = NULL, this is equivalent
+ * to a regular unlock. Clears HANDOFF, preserves WAITERS. Provides RELEASE
+ * semantics like a regular unlock, the __mutex_trylock() provides matching
+ * ACQUIRE semantics for the handoff.
+ */
+static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
+{
+ unsigned long owner = atomic_long_read(&lock->owner);
+
+ for (;;) {
+ unsigned long old, new;
+
+#ifdef CONFIG_DEBUG_MUTEXES
+ DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
+#endif
+
+ new = (owner & MUTEX_FLAG_WAITERS);
+ new |= (unsigned long)task;
+
+ old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
+ if (old == owner)
+ break;
+
+ owner = old;
+ }
+}
+
#ifndef CONFIG_DEBUG_LOCK_ALLOC
/*
* We split the mutex lock/unlock logic into separate fastpath and
@@ -69,7 +204,7 @@ EXPORT_SYMBOL(__mutex_init);
* We also put the fastpath first in the kernel image, to make sure the
* branch is predicted by the CPU as default-untaken.
*/
-__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
+static void __sched __mutex_lock_slowpath(struct mutex *lock);
/**
* mutex_lock - acquire the mutex
@@ -95,14 +230,10 @@ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
- /*
- * The locking fastpath is the 1->0 transition from
- * 'unlocked' into 'locked' state.
- */
- __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
- mutex_set_owner(lock);
-}
+ if (!__mutex_trylock_fast(lock))
+ __mutex_lock_slowpath(lock);
+}
EXPORT_SYMBOL(mutex_lock);
#endif
@@ -149,9 +280,6 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
/*
* After acquiring lock with fastpath or when we lost out in contested
* slowpath, set ctx and wake up any waiters so they can recheck.
- *
- * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
- * as the fastpath and opportunistic spinning are disabled in that case.
*/
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock,
@@ -176,7 +304,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
/*
* Check if lock is contended, if not there is nobody to wake up
*/
- if (likely(atomic_read(&lock->base.count) == 0))
+ if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
return;
/*
@@ -227,7 +355,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
bool ret = true;
rcu_read_lock();
- while (lock->owner == owner) {
+ while (__mutex_owner(lock) == owner) {
/*
* Ensure we emit the owner->on_cpu, dereference _after_
* checking lock->owner still matches owner. If that fails,
@@ -236,12 +364,16 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
*/
barrier();
- if (!owner->on_cpu || need_resched()) {
+ /*
+ * Use vcpu_is_preempted to detect lock holder preemption issue.
+ */
+ if (!owner->on_cpu || need_resched() ||
+ vcpu_is_preempted(task_cpu(owner))) {
ret = false;
break;
}
- cpu_relax_lowlatency();
+ cpu_relax();
}
rcu_read_unlock();
@@ -260,27 +392,25 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
return 0;
rcu_read_lock();
- owner = READ_ONCE(lock->owner);
+ owner = __mutex_owner(lock);
+
+ /*
+ * As lock holder preemption issue, we both skip spinning if task is not
+ * on cpu or its cpu is preempted
+ */
if (owner)
- retval = owner->on_cpu;
+ retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
rcu_read_unlock();
+
/*
- * if lock->owner is not set, the mutex owner may have just acquired
- * it and not set the owner yet or the mutex has been released.
+ * If lock->owner is not set, the mutex has been released. Return true
+ * such that we'll trylock in the spin path, which is a faster option
+ * than the blocking slow path.
*/
return retval;
}
/*
- * Atomically try to take the lock when it is available
- */
-static inline bool mutex_try_to_acquire(struct mutex *lock)
-{
- return !mutex_is_locked(lock) &&
- (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
-}
-
-/*
* Optimistic spinning.
*
* We try to spin for acquisition when we find that the lock owner
@@ -288,13 +418,6 @@ static inline bool mutex_try_to_acquire(struct mutex *lock)
* need to reschedule. The rationale is that if the lock owner is
* running, it is likely to release the lock soon.
*
- * Since this needs the lock owner, and this mutex implementation
- * doesn't track the owner atomically in the lock field, we need to
- * track it non-atomically.
- *
- * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
- * to serialize everything.
- *
* The mutex spinners are queued up using MCS lock so that only one
* spinner can compete for the mutex. However, if mutex spinning isn't
* going to happen, there is no point in going through the lock/unlock
@@ -302,24 +425,39 @@ static inline bool mutex_try_to_acquire(struct mutex *lock)
*
* Returns true when the lock was taken, otherwise false, indicating
* that we need to jump to the slowpath and sleep.
+ *
+ * The waiter flag is set to true if the spinner is a waiter in the wait
+ * queue. The waiter-spinner will spin on the lock directly and concurrently
+ * with the spinner at the head of the OSQ, if present, until the owner is
+ * changed to itself.
*/
static bool mutex_optimistic_spin(struct mutex *lock,
- struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+ struct ww_acquire_ctx *ww_ctx,
+ const bool use_ww_ctx, const bool waiter)
{
struct task_struct *task = current;
- if (!mutex_can_spin_on_owner(lock))
- goto done;
+ if (!waiter) {
+ /*
+ * The purpose of the mutex_can_spin_on_owner() function is
+ * to eliminate the overhead of osq_lock() and osq_unlock()
+ * in case spinning isn't possible. As a waiter-spinner
+ * is not going to take OSQ lock anyway, there is no need
+ * to call mutex_can_spin_on_owner().
+ */
+ if (!mutex_can_spin_on_owner(lock))
+ goto fail;
- /*
- * In order to avoid a stampede of mutex spinners trying to
- * acquire the mutex all at once, the spinners need to take a
- * MCS (queued) lock first before spinning on the owner field.
- */
- if (!osq_lock(&lock->osq))
- goto done;
+ /*
+ * In order to avoid a stampede of mutex spinners trying to
+ * acquire the mutex all at once, the spinners need to take a
+ * MCS (queued) lock first before spinning on the owner field.
+ */
+ if (!osq_lock(&lock->osq))
+ goto fail;
+ }
- while (true) {
+ for (;;) {
struct task_struct *owner;
if (use_ww_ctx && ww_ctx->acquired > 0) {
@@ -335,40 +473,26 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* performed the optimistic spinning cannot be done.
*/
if (READ_ONCE(ww->ctx))
- break;
+ goto fail_unlock;
}
/*
* If there's an owner, wait for it to either
* release the lock or go to sleep.
*/
- owner = READ_ONCE(lock->owner);
- if (owner && !mutex_spin_on_owner(lock, owner))
- break;
-
- /* Try to acquire the mutex if it is unlocked. */
- if (mutex_try_to_acquire(lock)) {
- lock_acquired(&lock->dep_map, ip);
-
- if (use_ww_ctx) {
- struct ww_mutex *ww;
- ww = container_of(lock, struct ww_mutex, base);
-
- ww_mutex_set_context_fastpath(ww, ww_ctx);
+ owner = __mutex_owner(lock);
+ if (owner) {
+ if (waiter && owner == task) {
+ smp_mb(); /* ACQUIRE */
+ break;
}
- mutex_set_owner(lock);
- osq_unlock(&lock->osq);
- return true;
+ if (!mutex_spin_on_owner(lock, owner))
+ goto fail_unlock;
}
- /*
- * When there's no owner, we might have preempted between the
- * owner acquiring the lock and setting the owner field. If
- * we're an RT task that will live-lock because we won't let
- * the owner complete.
- */
- if (!owner && (need_resched() || rt_task(task)))
+ /* Try to acquire the mutex if it is unlocked. */
+ if (__mutex_trylock(lock, waiter))
break;
/*
@@ -377,11 +501,20 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
- cpu_relax_lowlatency();
+ cpu_relax();
}
- osq_unlock(&lock->osq);
-done:
+ if (!waiter)
+ osq_unlock(&lock->osq);
+
+ return true;
+
+
+fail_unlock:
+ if (!waiter)
+ osq_unlock(&lock->osq);
+
+fail:
/*
* If we fell out of the spin path because of need_resched(),
* reschedule now, before we try-lock the mutex. This avoids getting
@@ -400,14 +533,14 @@ done:
}
#else
static bool mutex_optimistic_spin(struct mutex *lock,
- struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+ struct ww_acquire_ctx *ww_ctx,
+ const bool use_ww_ctx, const bool waiter)
{
return false;
}
#endif
-__visible __used noinline
-void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
+static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
/**
* mutex_unlock - release the mutex
@@ -422,21 +555,12 @@ void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
*/
void __sched mutex_unlock(struct mutex *lock)
{
- /*
- * The unlocking fastpath is the 0->1 transition from 'locked'
- * into 'unlocked' state:
- */
-#ifndef CONFIG_DEBUG_MUTEXES
- /*
- * When debugging is enabled we must not clear the owner before time,
- * the slow path will always be taken, and that clears the owner field
- * after verifying that it was indeed current.
- */
- mutex_clear_owner(lock);
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+ if (__mutex_unlock_fast(lock))
+ return;
#endif
- __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
+ __mutex_unlock_slowpath(lock, _RET_IP_);
}
-
EXPORT_SYMBOL(mutex_unlock);
/**
@@ -465,15 +589,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
lock->ctx = NULL;
}
-#ifndef CONFIG_DEBUG_MUTEXES
- /*
- * When debugging is enabled we must not clear the owner before time,
- * the slow path will always be taken, and that clears the owner field
- * after verifying that it was indeed current.
- */
- mutex_clear_owner(&lock->base);
-#endif
- __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
+ mutex_unlock(&lock->base);
}
EXPORT_SYMBOL(ww_mutex_unlock);
@@ -509,10 +625,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned long flags;
+ bool first = false;
+ struct ww_mutex *ww;
int ret;
if (use_ww_ctx) {
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ ww = container_of(lock, struct ww_mutex, base);
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY;
}
@@ -520,20 +638,21 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
- if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
+ if (__mutex_trylock(lock, false) ||
+ mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
/* got the lock, yay! */
+ lock_acquired(&lock->dep_map, ip);
+ if (use_ww_ctx)
+ ww_mutex_set_context_fastpath(ww, ww_ctx);
preempt_enable();
return 0;
}
spin_lock_mutex(&lock->wait_lock, flags);
-
/*
- * Once more, try to acquire the lock. Only try-lock the mutex if
- * it is unlocked to reduce unnecessary xchg() operations.
+ * After waiting to acquire the wait_lock, try again.
*/
- if (!mutex_is_locked(lock) &&
- (atomic_xchg_acquire(&lock->count, 0) == 1))
+ if (__mutex_trylock(lock, false))
goto skip_wait;
debug_mutex_lock_common(lock, &waiter);
@@ -543,26 +662,26 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
+ if (__mutex_waiter_is_first(lock, &waiter))
+ __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
+
lock_contended(&lock->dep_map, ip);
+ set_task_state(task, state);
for (;;) {
/*
- * Lets try to take the lock again - this is needed even if
- * we get here for the first time (shortly after failing to
- * acquire the lock), to make sure that we get a wakeup once
- * it's unlocked. Later on, if we sleep, this is the
- * operation that gives us the lock. We xchg it to -1, so
- * that when we release the lock, we properly wake up the
- * other waiters. We only attempt the xchg if the count is
- * non-negative in order to avoid unnecessary xchg operations:
+ * Once we hold wait_lock, we're serialized against
+ * mutex_unlock() handing the lock off to us, do a trylock
+ * before testing the error conditions to make sure we pick up
+ * the handoff.
*/
- if (atomic_read(&lock->count) >= 0 &&
- (atomic_xchg_acquire(&lock->count, -1) == 1))
- break;
+ if (__mutex_trylock(lock, first))
+ goto acquired;
/*
- * got a signal? (This code gets eliminated in the
- * TASK_UNINTERRUPTIBLE case.)
+ * Check for signals and wound conditions while holding
+ * wait_lock. This ensures the lock cancellation is ordered
+ * against mutex_unlock() and wake-ups do not go missing.
*/
if (unlikely(signal_pending_state(state, task))) {
ret = -EINTR;
@@ -575,36 +694,49 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
- __set_task_state(task, state);
-
- /* didn't get the lock, go to sleep: */
spin_unlock_mutex(&lock->wait_lock, flags);
schedule_preempt_disabled();
+
+ if (!first && __mutex_waiter_is_first(lock, &waiter)) {
+ first = true;
+ __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
+ }
+
+ set_task_state(task, state);
+ /*
+ * Here we order against unlock; we must either see it change
+ * state back to RUNNING and fall through the next schedule(),
+ * or we must see its unlock and acquire.
+ */
+ if ((first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, true)) ||
+ __mutex_trylock(lock, first))
+ break;
+
spin_lock_mutex(&lock->wait_lock, flags);
}
+ spin_lock_mutex(&lock->wait_lock, flags);
+acquired:
__set_task_state(task, TASK_RUNNING);
mutex_remove_waiter(lock, &waiter, task);
- /* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
- atomic_set(&lock->count, 0);
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
debug_mutex_free_waiter(&waiter);
skip_wait:
/* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
- mutex_set_owner(lock);
- if (use_ww_ctx) {
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ if (use_ww_ctx)
ww_mutex_set_context_slowpath(ww, ww_ctx);
- }
spin_unlock_mutex(&lock->wait_lock, flags);
preempt_enable();
return 0;
err:
+ __set_task_state(task, TASK_RUNNING);
mutex_remove_waiter(lock, &waiter, task);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
@@ -631,7 +763,6 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
0, nest, _RET_IP_, NULL, 0);
}
-
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
int __sched
@@ -650,7 +781,6 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
subclass, NULL, _RET_IP_, NULL, 0);
}
-
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
static inline int
@@ -715,54 +845,64 @@ EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
/*
* Release the lock, slowpath:
*/
-static inline void
-__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
+static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
{
- unsigned long flags;
- WAKE_Q(wake_q);
+ struct task_struct *next = NULL;
+ unsigned long owner, flags;
+ DEFINE_WAKE_Q(wake_q);
+
+ mutex_release(&lock->dep_map, 1, ip);
/*
- * As a performance measurement, release the lock before doing other
- * wakeup related duties to follow. This allows other tasks to acquire
- * the lock sooner, while still handling cleanups in past unlock calls.
- * This can be done as we do not enforce strict equivalence between the
- * mutex counter and wait_list.
- *
+ * Release the lock before (potentially) taking the spinlock such that
+ * other contenders can get on with things ASAP.
*
- * Some architectures leave the lock unlocked in the fastpath failure
- * case, others need to leave it locked. In the later case we have to
- * unlock it here - as the lock counter is currently 0 or negative.
+ * Except when HANDOFF, in that case we must not clear the owner field,
+ * but instead set it to the top waiter.
*/
- if (__mutex_slowpath_needs_to_unlock())
- atomic_set(&lock->count, 1);
+ owner = atomic_long_read(&lock->owner);
+ for (;;) {
+ unsigned long old;
+
+#ifdef CONFIG_DEBUG_MUTEXES
+ DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
+#endif
+
+ if (owner & MUTEX_FLAG_HANDOFF)
+ break;
+
+ old = atomic_long_cmpxchg_release(&lock->owner, owner,
+ __owner_flags(owner));
+ if (old == owner) {
+ if (owner & MUTEX_FLAG_WAITERS)
+ break;
+
+ return;
+ }
+
+ owner = old;
+ }
spin_lock_mutex(&lock->wait_lock, flags);
- mutex_release(&lock->dep_map, nested, _RET_IP_);
debug_mutex_unlock(lock);
-
if (!list_empty(&lock->wait_list)) {
/* get the first entry from the wait-list: */
struct mutex_waiter *waiter =
- list_entry(lock->wait_list.next,
- struct mutex_waiter, list);
+ list_first_entry(&lock->wait_list,
+ struct mutex_waiter, list);
+
+ next = waiter->task;
debug_mutex_wake_waiter(lock, waiter);
- wake_q_add(&wake_q, waiter->task);
+ wake_q_add(&wake_q, next);
}
- spin_unlock_mutex(&lock->wait_lock, flags);
- wake_up_q(&wake_q);
-}
+ if (owner & MUTEX_FLAG_HANDOFF)
+ __mutex_handoff(lock, next);
-/*
- * Release the lock, slowpath:
- */
-__visible void
-__mutex_unlock_slowpath(atomic_t *lock_count)
-{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
+ spin_unlock_mutex(&lock->wait_lock, flags);
- __mutex_unlock_common_slowpath(lock, 1);
+ wake_up_q(&wake_q);
}
#ifndef CONFIG_DEBUG_LOCK_ALLOC
@@ -789,38 +929,30 @@ __mutex_lock_interruptible_slowpath(struct mutex *lock);
*/
int __sched mutex_lock_interruptible(struct mutex *lock)
{
- int ret;
-
might_sleep();
- ret = __mutex_fastpath_lock_retval(&lock->count);
- if (likely(!ret)) {
- mutex_set_owner(lock);
+
+ if (__mutex_trylock_fast(lock))
return 0;
- } else
- return __mutex_lock_interruptible_slowpath(lock);
+
+ return __mutex_lock_interruptible_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock_interruptible);
int __sched mutex_lock_killable(struct mutex *lock)
{
- int ret;
-
might_sleep();
- ret = __mutex_fastpath_lock_retval(&lock->count);
- if (likely(!ret)) {
- mutex_set_owner(lock);
+
+ if (__mutex_trylock_fast(lock))
return 0;
- } else
- return __mutex_lock_killable_slowpath(lock);
+
+ return __mutex_lock_killable_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock_killable);
-__visible void __sched
-__mutex_lock_slowpath(atomic_t *lock_count)
+static noinline void __sched
+__mutex_lock_slowpath(struct mutex *lock)
{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
-
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
NULL, _RET_IP_, NULL, 0);
}
@@ -856,37 +988,6 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
#endif
-/*
- * Spinlock based trylock, we take the spinlock and check whether we
- * can get the lock:
- */
-static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
-{
- struct mutex *lock = container_of(lock_count, struct mutex, count);
- unsigned long flags;
- int prev;
-
- /* No need to trylock if the mutex is locked. */
- if (mutex_is_locked(lock))
- return 0;
-
- spin_lock_mutex(&lock->wait_lock, flags);
-
- prev = atomic_xchg_acquire(&lock->count, -1);
- if (likely(prev == 1)) {
- mutex_set_owner(lock);
- mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- }
-
- /* Set it back to 0 if there are no waiters: */
- if (likely(list_empty(&lock->wait_list)))
- atomic_set(&lock->count, 0);
-
- spin_unlock_mutex(&lock->wait_lock, flags);
-
- return prev == 1;
-}
-
/**
* mutex_trylock - try to acquire the mutex, without waiting
* @lock: the mutex to be acquired
@@ -903,13 +1004,12 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
*/
int __sched mutex_trylock(struct mutex *lock)
{
- int ret;
+ bool locked = __mutex_trylock(lock, false);
- ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
- if (ret)
- mutex_set_owner(lock);
+ if (locked)
+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- return ret;
+ return locked;
}
EXPORT_SYMBOL(mutex_trylock);
@@ -917,36 +1017,28 @@ EXPORT_SYMBOL(mutex_trylock);
int __sched
__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
- int ret;
-
might_sleep();
- ret = __mutex_fastpath_lock_retval(&lock->base.count);
-
- if (likely(!ret)) {
+ if (__mutex_trylock_fast(&lock->base)) {
ww_mutex_set_context_fastpath(lock, ctx);
- mutex_set_owner(&lock->base);
- } else
- ret = __ww_mutex_lock_slowpath(lock, ctx);
- return ret;
+ return 0;
+ }
+
+ return __ww_mutex_lock_slowpath(lock, ctx);
}
EXPORT_SYMBOL(__ww_mutex_lock);
int __sched
__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
- int ret;
-
might_sleep();
- ret = __mutex_fastpath_lock_retval(&lock->base.count);
-
- if (likely(!ret)) {
+ if (__mutex_trylock_fast(&lock->base)) {
ww_mutex_set_context_fastpath(lock, ctx);
- mutex_set_owner(&lock->base);
- } else
- ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
- return ret;
+ return 0;
+ }
+
+ return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
}
EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 6cd6b8e9efd7..4410a4af42a3 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -16,32 +16,6 @@
#define mutex_remove_waiter(lock, waiter, task) \
__list_del((waiter)->list.prev, (waiter)->list.next)
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-/*
- * The mutex owner can get read and written to locklessly.
- * We should use WRITE_ONCE when writing the owner value to
- * avoid store tearing, otherwise, a thread could potentially
- * read a partially written and incomplete owner value.
- */
-static inline void mutex_set_owner(struct mutex *lock)
-{
- WRITE_ONCE(lock->owner, current);
-}
-
-static inline void mutex_clear_owner(struct mutex *lock)
-{
- WRITE_ONCE(lock->owner, NULL);
-}
-#else
-static inline void mutex_set_owner(struct mutex *lock)
-{
-}
-
-static inline void mutex_clear_owner(struct mutex *lock)
-{
-}
-#endif
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 05a37857ab55..a3167941093b 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -21,6 +21,11 @@ static inline int encode_cpu(int cpu_nr)
return cpu_nr + 1;
}
+static inline int node_cpu(struct optimistic_spin_node *node)
+{
+ return node->cpu - 1;
+}
+
static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
{
int cpu_nr = encoded_cpu_val - 1;
@@ -75,7 +80,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
break;
}
- cpu_relax_lowlatency();
+ cpu_relax();
}
return next;
@@ -118,11 +123,13 @@ bool osq_lock(struct optimistic_spin_queue *lock)
while (!READ_ONCE(node->locked)) {
/*
* If we need to reschedule bail... so we can block.
+ * Use vcpu_is_preempted() to avoid waiting for a preempted
+ * lock holder:
*/
- if (need_resched())
+ if (need_resched() || vcpu_is_preempted(node_cpu(node->prev)))
goto unqueue;
- cpu_relax_lowlatency();
+ cpu_relax();
}
return true;
@@ -148,7 +155,7 @@ unqueue:
if (smp_load_acquire(&node->locked))
return true;
- cpu_relax_lowlatency();
+ cpu_relax();
/*
* Or we race against a concurrent unqueue()'s step-B, in which
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 19248ddf37ce..cc3ed0ccdfa2 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -54,7 +54,7 @@ static __always_inline void
rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
{
while ((cnts & _QW_WMASK) == _QW_LOCKED) {
- cpu_relax_lowlatency();
+ cpu_relax();
cnts = atomic_read_acquire(&lock->cnts);
}
}
@@ -130,7 +130,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
(cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
break;
- cpu_relax_lowlatency();
+ cpu_relax();
}
/* When no more readers, set the locked flag */
@@ -141,7 +141,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
_QW_LOCKED) == _QW_WAITING))
break;
- cpu_relax_lowlatency();
+ cpu_relax();
}
unlock:
arch_spin_unlock(&lock->wait_lock);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2c49d76f96c3..2f443ed2320a 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1446,7 +1446,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
struct wake_q_head *wqh))
{
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
rt_mutex_deadlock_account_unlock(current);
@@ -1619,11 +1619,15 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
* proxy owner
*
- * @lock: the rt_mutex to be locked
+ * @lock: the rt_mutex to be locked
* @proxy_owner:the task to set as owner
*
* No locking. Caller has to do serializing itself
- * Special API call for PI-futex support
+ *
+ * Special API call for PI-futex support. This initializes the rtmutex and
+ * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
+ * possible at this point because the pi_state which contains the rtmutex
+ * is not yet visible to other tasks.
*/
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
@@ -1637,10 +1641,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
/**
* rt_mutex_proxy_unlock - release a lock on behalf of owner
*
- * @lock: the rt_mutex to be locked
+ * @lock: the rt_mutex to be locked
*
* No locking. Caller has to do serializing itself
- * Special API call for PI-futex support
+ *
+ * Special API call for PI-futex support. This merrily cleans up the rtmutex
+ * (debugging) state. Concurrent operations on this rt_mutex are not
+ * possible because it belongs to the pi_state which is about to be freed
+ * and it is not longer visible to other tasks.
*/
void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner)
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index e317e1cbb3eb..990134617b4c 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -71,13 +71,12 @@ task_top_pi_waiter(struct task_struct *p)
* lock->owner state tracking:
*/
#define RT_MUTEX_HAS_WAITERS 1UL
-#define RT_MUTEX_OWNER_MASKALL 1UL
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
{
unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
- return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
+ return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
}
/*
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 2337b4bb2366..631506004f9e 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -225,7 +225,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
struct rwsem_waiter waiter;
struct task_struct *tsk = current;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_READ;
@@ -336,7 +336,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
goto done;
}
- ret = owner->on_cpu;
+ /*
+ * As lock holder preemption issue, we both skip spinning if task is not
+ * on cpu or its cpu is preempted
+ */
+ ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
done:
rcu_read_unlock();
return ret;
@@ -362,13 +366,17 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
*/
barrier();
- /* abort spinning when need_resched or owner is not running */
- if (!owner->on_cpu || need_resched()) {
+ /*
+ * abort spinning when need_resched or owner is not running or
+ * owner's cpu is preempted.
+ */
+ if (!owner->on_cpu || need_resched() ||
+ vcpu_is_preempted(task_cpu(owner))) {
rcu_read_unlock();
return false;
}
- cpu_relax_lowlatency();
+ cpu_relax();
}
rcu_read_unlock();
out:
@@ -423,7 +431,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
- cpu_relax_lowlatency();
+ cpu_relax();
}
osq_unlock(&sem->osq);
done:
@@ -461,7 +469,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
bool waiting = true; /* any queued threads before us */
struct rwsem_waiter waiter;
struct rw_semaphore *ret = sem;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
/* undo write bias from down_write operation, stop active locking */
count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
@@ -495,7 +503,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
* wake any read locks that were queued ahead of us.
*/
if (count > RWSEM_WAITING_BIAS) {
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
/*
@@ -571,7 +579,7 @@ __visible
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
/*
* If a spinner is present, it is not necessary to do the wakeup.
@@ -625,7 +633,7 @@ __visible
struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
raw_spin_lock_irqsave(&sem->wait_lock, flags);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index f7a55e9ff2f7..577f2288d19f 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2166,27 +2166,20 @@ void resume_console(void)
/**
* console_cpu_notify - print deferred console messages after CPU hotplug
- * @self: notifier struct
- * @action: CPU hotplug event
- * @hcpu: unused
+ * @cpu: unused
*
* If printk() is called from a CPU that is not online yet, the messages
* will be spooled but will not show up on the console. This function is
* called when a new CPU comes online (or fails to come up), and ensures
* that any such output gets printed.
*/
-static int console_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
-{
- switch (action) {
- case CPU_ONLINE:
- case CPU_DEAD:
- case CPU_DOWN_FAILED:
- case CPU_UP_CANCELED:
+static int console_cpu_notify(unsigned int cpu)
+{
+ if (!cpuhp_tasks_frozen) {
console_lock();
console_unlock();
}
- return NOTIFY_OK;
+ return 0;
}
/**
@@ -2824,6 +2817,7 @@ EXPORT_SYMBOL(unregister_console);
static int __init printk_late_init(void)
{
struct console *con;
+ int ret;
for_each_console(con) {
if (!keep_bootcon && con->flags & CON_BOOT) {
@@ -2838,7 +2832,12 @@ static int __init printk_late_init(void)
unregister_console(con);
}
}
- hotcpu_notifier(console_cpu_notify, 0);
+ ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
+ console_cpu_notify);
+ WARN_ON(ret < 0);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
+ console_cpu_notify, NULL);
+ WARN_ON(ret < 0);
return 0;
}
late_initcall(printk_late_init);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index bf08fee53dc7..87c51225ceec 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -289,15 +289,24 @@ static int rcu_torture_read_lock(void) __acquires(RCU)
static void rcu_read_delay(struct torture_random_state *rrsp)
{
+ unsigned long started;
+ unsigned long completed;
const unsigned long shortdelay_us = 200;
const unsigned long longdelay_ms = 50;
+ unsigned long long ts;
/* We want a short delay sometimes to make a reader delay the grace
* period, and we want a long delay occasionally to trigger
* force_quiescent_state. */
- if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
+ if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
+ started = cur_ops->completed();
+ ts = rcu_trace_clock_local();
mdelay(longdelay_ms);
+ completed = cur_ops->completed();
+ do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
+ started, completed);
+ }
if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
udelay(shortdelay_us);
#ifdef CONFIG_PREEMPT
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 69a5611a7e7c..96c52e43f7ca 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1304,7 +1304,8 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
if (!rcu_kick_kthreads)
return;
j = READ_ONCE(rsp->jiffies_kick_kthreads);
- if (time_after(jiffies, j) && rsp->gp_kthread) {
+ if (time_after(jiffies, j) && rsp->gp_kthread &&
+ (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
rcu_ftrace_dump(DUMP_ALL);
wake_up_process(rsp->gp_kthread);
@@ -2828,8 +2829,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
* Also schedule RCU core processing.
*
* This function must be called from hardirq context. It is normally
- * invoked from the scheduling-clock interrupt. If rcu_pending returns
- * false, there is no point in invoking rcu_check_callbacks().
+ * invoked from the scheduling-clock interrupt.
*/
void rcu_check_callbacks(int user)
{
@@ -3121,7 +3121,9 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
unsigned long flags;
struct rcu_data *rdp;
- WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
+ /* Misaligned rcu_head! */
+ WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
+
if (debug_rcu_head_queue(head)) {
/* Probable double call_rcu(), so leak the callback. */
WRITE_ONCE(head->func, rcu_leak_callback);
@@ -3130,13 +3132,6 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
}
head->func = func;
head->next = NULL;
-
- /*
- * Opportunistically note grace-period endings and beginnings.
- * Note that we might see a beginning right after we see an
- * end, but never vice versa, since this CPU has to pass through
- * a quiescent state betweentimes.
- */
local_irq_save(flags);
rdp = this_cpu_ptr(rsp->rda);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index e99a5234d9ed..fe98dd24adf8 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -404,6 +404,7 @@ struct rcu_data {
atomic_long_t exp_workdone1; /* # done by others #1. */
atomic_long_t exp_workdone2; /* # done by others #2. */
atomic_long_t exp_workdone3; /* # done by others #3. */
+ int exp_dynticks_snap; /* Double-check need for IPI. */
/* 7) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 24343eb87b58..d3053e99fdb6 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -358,8 +358,10 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+ rdp->exp_dynticks_snap =
+ atomic_add_return(0, &rdtp->dynticks);
if (raw_smp_processor_id() == cpu ||
- !(atomic_add_return(0, &rdtp->dynticks) & 0x1) ||
+ !(rdp->exp_dynticks_snap & 0x1) ||
!(rnp->qsmaskinitnext & rdp->grpmask))
mask_ofl_test |= rdp->grpmask;
}
@@ -377,9 +379,17 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
/* IPI the remaining CPUs for expedited quiescent state. */
for_each_leaf_node_possible_cpu(rnp, cpu) {
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
if (!(mask_ofl_ipi & mask))
continue;
retry_ipi:
+ if (atomic_add_return(0, &rdtp->dynticks) !=
+ rdp->exp_dynticks_snap) {
+ mask_ofl_test |= mask;
+ continue;
+ }
ret = smp_call_function_single(cpu, func, rsp, 0);
if (!ret) {
mask_ofl_ipi &= ~mask;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c95fbcd9fe1f..966556ebdbb3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -75,11 +75,11 @@
#include <linux/compiler.h>
#include <linux/frame.h>
#include <linux/prefetch.h>
+#include <linux/mutex.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
-#include <asm/mutex.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
@@ -1995,14 +1995,15 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
* @state: the mask of task states that can be woken
* @wake_flags: wake modifier flags (WF_*)
*
- * Put it on the run-queue if it's not already there. The "current"
- * thread is always on the run-queue (except when the actual
- * re-schedule is in progress), and as such you're allowed to do
- * the simpler "current->state = TASK_RUNNING" to mark yourself
- * runnable without the overhead of this.
+ * If (@state & @p->state) @p->state = TASK_RUNNING.
*
- * Return: %true if @p was woken up, %false if it was already running.
- * or @state didn't match @p's state.
+ * If the task was not queued/runnable, also place it back on a runqueue.
+ *
+ * Atomic against schedule() which would dequeue a task, also see
+ * set_current_state().
+ *
+ * Return: %true if @p->state changes (an actual wakeup was done),
+ * %false otherwise.
*/
static int
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
@@ -5708,7 +5709,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
printk(KERN_CONT " %*pbl",
cpumask_pr_args(sched_group_cpus(group)));
if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
- printk(KERN_CONT " (cpu_capacity = %d)",
+ printk(KERN_CONT " (cpu_capacity = %lu)",
group->sgc->capacity);
}
@@ -6185,6 +6186,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* die on a /0 trap.
*/
sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span);
+ sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
/*
* Make sure the first group of this domain contains the
@@ -6302,7 +6304,22 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
WARN_ON(!sg);
do {
+ int cpu, max_cpu = -1;
+
sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+
+ if (!(sd->flags & SD_ASYM_PACKING))
+ goto next;
+
+ for_each_cpu(cpu, sched_group_cpus(sg)) {
+ if (max_cpu < 0)
+ max_cpu = cpu;
+ else if (sched_asym_prefer(cpu, max_cpu))
+ max_cpu = cpu;
+ }
+ sg->asym_prefer_cpu = max_cpu;
+
+next:
sg = sg->next;
} while (sg != sd->groups);
@@ -7603,6 +7620,7 @@ void __init sched_init(void)
#ifdef CONFIG_FAIR_GROUP_SCHED
root_task_group.shares = ROOT_TASK_GROUP_LOAD;
INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
+ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
/*
* How much cpu bandwidth does root_task_group get?
*
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index bc0b309c3f19..9add206b5608 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -297,7 +297,7 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v)
for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
seq_printf(sf, "%s %lld\n",
cpuacct_stat_desc[stat],
- cputime64_to_clock_t(val[stat]));
+ (long long)cputime64_to_clock_t(val[stat]));
}
return 0;
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 5ebee3164e64..7700a9cba335 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -128,16 +128,13 @@ static inline void task_group_account_field(struct task_struct *p, int index,
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
*/
-void account_user_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled)
+void account_user_time(struct task_struct *p, cputime_t cputime)
{
int index;
/* Add user time to process. */
p->utime += cputime;
- p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime);
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
@@ -153,16 +150,13 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
* Account guest cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in virtual machine since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
*/
-static void account_guest_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled)
+static void account_guest_time(struct task_struct *p, cputime_t cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
/* Add guest time to process. */
p->utime += cputime;
- p->utimescaled += cputime_scaled;
account_group_user_time(p, cputime);
p->gtime += cputime;
@@ -180,16 +174,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
* Account system cpu time to a process and desired cpustat field
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in kernel space since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
- * @target_cputime64: pointer to cpustat field that has to be updated
+ * @index: pointer to cpustat field that has to be updated
*/
static inline
-void __account_system_time(struct task_struct *p, cputime_t cputime,
- cputime_t cputime_scaled, int index)
+void __account_system_time(struct task_struct *p, cputime_t cputime, int index)
{
/* Add system time to process. */
p->stime += cputime;
- p->stimescaled += cputime_scaled;
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
@@ -204,15 +195,14 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the cpu time spent in kernel space since the last update
- * @cputime_scaled: cputime scaled by cpu frequency
*/
void account_system_time(struct task_struct *p, int hardirq_offset,
- cputime_t cputime, cputime_t cputime_scaled)
+ cputime_t cputime)
{
int index;
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
- account_guest_time(p, cputime, cputime_scaled);
+ account_guest_time(p, cputime);
return;
}
@@ -223,7 +213,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
else
index = CPUTIME_SYSTEM;
- __account_system_time(p, cputime, cputime_scaled, index);
+ __account_system_time(p, cputime, index);
}
/*
@@ -390,7 +380,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
struct rq *rq, int ticks)
{
u64 cputime = (__force u64) cputime_one_jiffy * ticks;
- cputime_t scaled, other;
+ cputime_t other;
/*
* When returning from idle, many ticks can get accounted at
@@ -403,7 +393,6 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
if (other >= cputime)
return;
cputime -= other;
- scaled = cputime_to_scaled(cputime);
if (this_cpu_ksoftirqd() == p) {
/*
@@ -411,15 +400,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
* So, we have to handle it separately here.
* Also, p->stime needs to be updated for ksoftirqd.
*/
- __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
+ __account_system_time(p, cputime, CPUTIME_SOFTIRQ);
} else if (user_tick) {
- account_user_time(p, cputime, scaled);
+ account_user_time(p, cputime);
} else if (p == rq->idle) {
account_idle_time(cputime);
} else if (p->flags & PF_VCPU) { /* System time or guest time */
- account_guest_time(p, cputime, scaled);
+ account_guest_time(p, cputime);
} else {
- __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
+ __account_system_time(p, cputime, CPUTIME_SYSTEM);
}
}
@@ -502,7 +491,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
- cputime_t cputime, scaled, steal;
+ cputime_t cputime, steal;
struct rq *rq = this_rq();
if (vtime_accounting_cpu_enabled())
@@ -520,12 +509,11 @@ void account_process_tick(struct task_struct *p, int user_tick)
return;
cputime -= steal;
- scaled = cputime_to_scaled(cputime);
if (user_tick)
- account_user_time(p, cputime, scaled);
+ account_user_time(p, cputime);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
- account_system_time(p, HARDIRQ_OFFSET, cputime, scaled);
+ account_system_time(p, HARDIRQ_OFFSET, cputime);
else
account_idle_time(cputime);
}
@@ -746,7 +734,7 @@ static void __vtime_account_system(struct task_struct *tsk)
{
cputime_t delta_cpu = get_vtime_delta(tsk);
- account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
+ account_system_time(tsk, irq_count(), delta_cpu);
}
void vtime_account_system(struct task_struct *tsk)
@@ -767,7 +755,7 @@ void vtime_account_user(struct task_struct *tsk)
tsk->vtime_snap_whence = VTIME_SYS;
if (vtime_delta(tsk)) {
delta_cpu = get_vtime_delta(tsk);
- account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
+ account_user_time(tsk, delta_cpu);
}
write_seqcount_end(&tsk->vtime_seqcount);
}
@@ -863,29 +851,25 @@ cputime_t task_gtime(struct task_struct *t)
* add up the pending nohz execution time since the last
* cputime snapshot.
*/
-static void
-fetch_task_cputime(struct task_struct *t,
- cputime_t *u_dst, cputime_t *s_dst,
- cputime_t *u_src, cputime_t *s_src,
- cputime_t *udelta, cputime_t *sdelta)
+void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
{
+ cputime_t delta;
unsigned int seq;
- unsigned long long delta;
- do {
- *udelta = 0;
- *sdelta = 0;
+ if (!vtime_accounting_enabled()) {
+ *utime = t->utime;
+ *stime = t->stime;
+ return;
+ }
+ do {
seq = read_seqcount_begin(&t->vtime_seqcount);
- if (u_dst)
- *u_dst = *u_src;
- if (s_dst)
- *s_dst = *s_src;
+ *utime = t->utime;
+ *stime = t->stime;
/* Task is sleeping, nothing to add */
- if (t->vtime_snap_whence == VTIME_INACTIVE ||
- is_idle_task(t))
+ if (t->vtime_snap_whence == VTIME_INACTIVE || is_idle_task(t))
continue;
delta = vtime_delta(t);
@@ -894,54 +878,10 @@ fetch_task_cputime(struct task_struct *t,
* Task runs either in user or kernel space, add pending nohz time to
* the right place.
*/
- if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
- *udelta = delta;
- } else {
- if (t->vtime_snap_whence == VTIME_SYS)
- *sdelta = delta;
- }
+ if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU)
+ *utime += delta;
+ else if (t->vtime_snap_whence == VTIME_SYS)
+ *stime += delta;
} while (read_seqcount_retry(&t->vtime_seqcount, seq));
}
-
-
-void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
-{
- cputime_t udelta, sdelta;
-
- if (!vtime_accounting_enabled()) {
- if (utime)
- *utime = t->utime;
- if (stime)
- *stime = t->stime;
- return;
- }
-
- fetch_task_cputime(t, utime, stime, &t->utime,
- &t->stime, &udelta, &sdelta);
- if (utime)
- *utime += udelta;
- if (stime)
- *stime += sdelta;
-}
-
-void task_cputime_scaled(struct task_struct *t,
- cputime_t *utimescaled, cputime_t *stimescaled)
-{
- cputime_t udelta, sdelta;
-
- if (!vtime_accounting_enabled()) {
- if (utimescaled)
- *utimescaled = t->utimescaled;
- if (stimescaled)
- *stimescaled = t->stimescaled;
- return;
- }
-
- fetch_task_cputime(t, utimescaled, stimescaled,
- &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
- if (utimescaled)
- *utimescaled += cputime_to_scaled(udelta);
- if (stimescaled)
- *stimescaled += cputime_to_scaled(sdelta);
-}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 37e2449186c4..70ef2b1901e4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -586,7 +586,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
/*
* The task might have changed its scheduling policy to something
- * different than SCHED_DEADLINE (through switched_fromd_dl()).
+ * different than SCHED_DEADLINE (through switched_from_dl()).
*/
if (!dl_task(p)) {
__dl_clear_params(p);
@@ -1137,7 +1137,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct pin_cookie coo
pull_dl_task(rq);
lockdep_repin_lock(&rq->lock, cookie);
/*
- * pull_rt_task() can drop (and re-acquire) rq->lock; this
+ * pull_dl_task() can drop (and re-acquire) rq->lock; this
* means a stop task can slip in, in which case we need to
* re-start task selection.
*/
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c242944f5cbd..18d9e75f1f6e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -37,7 +37,6 @@
/*
* Targeted preemption latency for CPU-bound tasks:
- * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
*
* NOTE: this latency value is not the same as the concept of
* 'timeslice length' - timeslices in CFS are of variable length
@@ -46,31 +45,35 @@
*
* (to see the precise effective timeslice length of your workload,
* run vmstat and monitor the context-switches (cs) field)
+ *
+ * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_latency = 6000000ULL;
-unsigned int normalized_sysctl_sched_latency = 6000000ULL;
+unsigned int sysctl_sched_latency = 6000000ULL;
+unsigned int normalized_sysctl_sched_latency = 6000000ULL;
/*
* The initial- and re-scaling of tunables is configurable
- * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*
* Options are:
- * SCHED_TUNABLESCALING_NONE - unscaled, always *1
- * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
- * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
+ *
+ * SCHED_TUNABLESCALING_NONE - unscaled, always *1
+ * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
+ * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
+ *
+ * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
*/
-enum sched_tunable_scaling sysctl_sched_tunable_scaling
- = SCHED_TUNABLESCALING_LOG;
+enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
/*
* Minimal preemption granularity for CPU-bound tasks:
+ *
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 750000ULL;
-unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
+unsigned int sysctl_sched_min_granularity = 750000ULL;
+unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
/*
- * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
+ * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
*/
static unsigned int sched_nr_latency = 8;
@@ -82,23 +85,27 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
/*
* SCHED_OTHER wake-up granularity.
- * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
+ *
+ * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
-unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
+unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
+unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
-const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
+#ifdef CONFIG_SMP
/*
- * The exponential sliding window over which load is averaged for shares
- * distribution.
- * (default: 10msec)
+ * For asym packing, by default the lower numbered cpu has higher priority.
*/
-unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
+int __weak arch_asym_cpu_priority(int cpu)
+{
+ return -cpu;
+}
+#endif
#ifdef CONFIG_CFS_BANDWIDTH
/*
@@ -109,16 +116,18 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
* to consumption or the quota being specified to be smaller than the slice)
* we will always only issue the remaining available time.
*
- * default: 5 msec, units: microseconds
- */
-unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
+ * (default: 5 msec, units: microseconds)
+ */
+unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
/*
* The margin used when comparing utilization with CPU capacity:
- * util * 1024 < capacity * margin
+ * util * margin < capacity * 1024
+ *
+ * (default: ~20%)
*/
-unsigned int capacity_margin = 1280; /* ~20% */
+unsigned int capacity_margin = 1280;
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
@@ -290,19 +299,59 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
{
if (!cfs_rq->on_list) {
+ struct rq *rq = rq_of(cfs_rq);
+ int cpu = cpu_of(rq);
/*
* Ensure we either appear before our parent (if already
* enqueued) or force our parent to appear after us when it is
- * enqueued. The fact that we always enqueue bottom-up
- * reduces this to two cases.
+ * enqueued. The fact that we always enqueue bottom-up
+ * reduces this to two cases and a special case for the root
+ * cfs_rq. Furthermore, it also means that we will always reset
+ * tmp_alone_branch either when the branch is connected
+ * to a tree or when we reach the beg of the tree
*/
if (cfs_rq->tg->parent &&
- cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
- list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
- &rq_of(cfs_rq)->leaf_cfs_rq_list);
- } else {
+ cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
+ /*
+ * If parent is already on the list, we add the child
+ * just before. Thanks to circular linked property of
+ * the list, this means to put the child at the tail
+ * of the list that starts by parent.
+ */
+ list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
+ &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
+ /*
+ * The branch is now connected to its tree so we can
+ * reset tmp_alone_branch to the beginning of the
+ * list.
+ */
+ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
+ } else if (!cfs_rq->tg->parent) {
+ /*
+ * cfs rq without parent should be put
+ * at the tail of the list.
+ */
list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
- &rq_of(cfs_rq)->leaf_cfs_rq_list);
+ &rq->leaf_cfs_rq_list);
+ /*
+ * We have reach the beg of a tree so we can reset
+ * tmp_alone_branch to the beginning of the list.
+ */
+ rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
+ } else {
+ /*
+ * The parent has not already been added so we want to
+ * make sure that it will be put after us.
+ * tmp_alone_branch points to the beg of the branch
+ * where we will add parent.
+ */
+ list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
+ rq->tmp_alone_branch);
+ /*
+ * update tmp_alone_branch to points to the new beg
+ * of the branch
+ */
+ rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
}
cfs_rq->on_list = 1;
@@ -708,9 +757,7 @@ void init_entity_runnable_average(struct sched_entity *se)
}
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
-static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
-static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
-static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
+static void attach_entity_cfs_rq(struct sched_entity *se);
/*
* With new tasks being created, their initial util_avgs are extrapolated
@@ -742,7 +789,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
- u64 now = cfs_rq_clock_task(cfs_rq);
if (cap > 0) {
if (cfs_rq->avg.util_avg != 0) {
@@ -770,14 +816,12 @@ void post_init_entity_util_avg(struct sched_entity *se)
* such that the next switched_to_fair() has the
* expected state.
*/
- se->avg.last_update_time = now;
+ se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
return;
}
}
- update_cfs_rq_load_avg(now, cfs_rq, false);
- attach_entity_load_avg(cfs_rq, se);
- update_tg_load_avg(cfs_rq, false);
+ attach_entity_cfs_rq(se);
}
#else /* !CONFIG_SMP */
@@ -2890,6 +2934,26 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa,
return decayed;
}
+/*
+ * Signed add and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define add_positive(_ptr, _val) do { \
+ typeof(_ptr) ptr = (_ptr); \
+ typeof(_val) val = (_val); \
+ typeof(*ptr) res, var = READ_ONCE(*ptr); \
+ \
+ res = var + val; \
+ \
+ if (val < 0 && res > var) \
+ res = 0; \
+ \
+ WRITE_ONCE(*ptr, res); \
+} while (0)
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/**
* update_tg_load_avg - update the tg's load avg
@@ -2969,8 +3033,138 @@ void set_task_rq_fair(struct sched_entity *se,
se->avg.last_update_time = n_last_update_time;
}
}
+
+/* Take into account change of utilization of a child task group */
+static inline void
+update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+ long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
+
+ /* Nothing to update */
+ if (!delta)
+ return;
+
+ /* Set new sched_entity's utilization */
+ se->avg.util_avg = gcfs_rq->avg.util_avg;
+ se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
+
+ /* Update parent cfs_rq utilization */
+ add_positive(&cfs_rq->avg.util_avg, delta);
+ cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
+}
+
+/* Take into account change of load of a child task group */
+static inline void
+update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+ struct cfs_rq *gcfs_rq = group_cfs_rq(se);
+ long delta, load = gcfs_rq->avg.load_avg;
+
+ /*
+ * If the load of group cfs_rq is null, the load of the
+ * sched_entity will also be null so we can skip the formula
+ */
+ if (load) {
+ long tg_load;
+
+ /* Get tg's load and ensure tg_load > 0 */
+ tg_load = atomic_long_read(&gcfs_rq->tg->load_avg) + 1;
+
+ /* Ensure tg_load >= load and updated with current load*/
+ tg_load -= gcfs_rq->tg_load_avg_contrib;
+ tg_load += load;
+
+ /*
+ * We need to compute a correction term in the case that the
+ * task group is consuming more CPU than a task of equal
+ * weight. A task with a weight equals to tg->shares will have
+ * a load less or equal to scale_load_down(tg->shares).
+ * Similarly, the sched_entities that represent the task group
+ * at parent level, can't have a load higher than
+ * scale_load_down(tg->shares). And the Sum of sched_entities'
+ * load must be <= scale_load_down(tg->shares).
+ */
+ if (tg_load > scale_load_down(gcfs_rq->tg->shares)) {
+ /* scale gcfs_rq's load into tg's shares*/
+ load *= scale_load_down(gcfs_rq->tg->shares);
+ load /= tg_load;
+ }
+ }
+
+ delta = load - se->avg.load_avg;
+
+ /* Nothing to update */
+ if (!delta)
+ return;
+
+ /* Set new sched_entity's load */
+ se->avg.load_avg = load;
+ se->avg.load_sum = se->avg.load_avg * LOAD_AVG_MAX;
+
+ /* Update parent cfs_rq load */
+ add_positive(&cfs_rq->avg.load_avg, delta);
+ cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * LOAD_AVG_MAX;
+
+ /*
+ * If the sched_entity is already enqueued, we also have to update the
+ * runnable load avg.
+ */
+ if (se->on_rq) {
+ /* Update parent cfs_rq runnable_load_avg */
+ add_positive(&cfs_rq->runnable_load_avg, delta);
+ cfs_rq->runnable_load_sum = cfs_rq->runnable_load_avg * LOAD_AVG_MAX;
+ }
+}
+
+static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq)
+{
+ cfs_rq->propagate_avg = 1;
+}
+
+static inline int test_and_clear_tg_cfs_propagate(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = group_cfs_rq(se);
+
+ if (!cfs_rq->propagate_avg)
+ return 0;
+
+ cfs_rq->propagate_avg = 0;
+ return 1;
+}
+
+/* Update task and its cfs_rq load average */
+static inline int propagate_entity_load_avg(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq;
+
+ if (entity_is_task(se))
+ return 0;
+
+ if (!test_and_clear_tg_cfs_propagate(se))
+ return 0;
+
+ cfs_rq = cfs_rq_of(se);
+
+ set_tg_cfs_propagate(cfs_rq);
+
+ update_tg_cfs_util(cfs_rq, se);
+ update_tg_cfs_load(cfs_rq, se);
+
+ return 1;
+}
+
#else /* CONFIG_FAIR_GROUP_SCHED */
+
static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
+
+static inline int propagate_entity_load_avg(struct sched_entity *se)
+{
+ return 0;
+}
+
+static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
+
#endif /* CONFIG_FAIR_GROUP_SCHED */
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
@@ -3041,6 +3235,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
sub_positive(&sa->load_avg, r);
sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
removed_load = 1;
+ set_tg_cfs_propagate(cfs_rq);
}
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
@@ -3048,6 +3243,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
sub_positive(&sa->util_avg, r);
sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
removed_util = 1;
+ set_tg_cfs_propagate(cfs_rq);
}
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
@@ -3064,23 +3260,35 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
return decayed || removed_load;
}
+/*
+ * Optional action to be done while updating the load average
+ */
+#define UPDATE_TG 0x1
+#define SKIP_AGE_LOAD 0x2
+
/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct sched_entity *se, int update_tg)
+static inline void update_load_avg(struct sched_entity *se, int flags)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 now = cfs_rq_clock_task(cfs_rq);
struct rq *rq = rq_of(cfs_rq);
int cpu = cpu_of(rq);
+ int decayed;
/*
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
- __update_load_avg(now, cpu, &se->avg,
+ if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) {
+ __update_load_avg(now, cpu, &se->avg,
se->on_rq * scale_load_down(se->load.weight),
cfs_rq->curr == se, NULL);
+ }
+
+ decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
+ decayed |= propagate_entity_load_avg(se);
- if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg)
+ if (decayed && (flags & UPDATE_TG))
update_tg_load_avg(cfs_rq, 0);
}
@@ -3094,31 +3302,12 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
*/
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- if (!sched_feat(ATTACH_AGE_LOAD))
- goto skip_aging;
-
- /*
- * If we got migrated (either between CPUs or between cgroups) we'll
- * have aged the average right before clearing @last_update_time.
- *
- * Or we're fresh through post_init_entity_util_avg().
- */
- if (se->avg.last_update_time) {
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
- &se->avg, 0, 0, NULL);
-
- /*
- * XXX: we could have just aged the entire load away if we've been
- * absent from the fair class for too long.
- */
- }
-
-skip_aging:
se->avg.last_update_time = cfs_rq->avg.last_update_time;
cfs_rq->avg.load_avg += se->avg.load_avg;
cfs_rq->avg.load_sum += se->avg.load_sum;
cfs_rq->avg.util_avg += se->avg.util_avg;
cfs_rq->avg.util_sum += se->avg.util_sum;
+ set_tg_cfs_propagate(cfs_rq);
cfs_rq_util_change(cfs_rq);
}
@@ -3133,14 +3322,12 @@ skip_aging:
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
- &se->avg, se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+ set_tg_cfs_propagate(cfs_rq);
cfs_rq_util_change(cfs_rq);
}
@@ -3150,34 +3337,20 @@ static inline void
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct sched_avg *sa = &se->avg;
- u64 now = cfs_rq_clock_task(cfs_rq);
- int migrated, decayed;
-
- migrated = !sa->last_update_time;
- if (!migrated) {
- __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
- se->on_rq * scale_load_down(se->load.weight),
- cfs_rq->curr == se, NULL);
- }
-
- decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated);
cfs_rq->runnable_load_avg += sa->load_avg;
cfs_rq->runnable_load_sum += sa->load_sum;
- if (migrated)
+ if (!sa->last_update_time) {
attach_entity_load_avg(cfs_rq, se);
-
- if (decayed || migrated)
update_tg_load_avg(cfs_rq, 0);
+ }
}
/* Remove the runnable load generated by se from cfs_rq's runnable load average */
static inline void
dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- update_load_avg(se, 1);
-
cfs_rq->runnable_load_avg =
max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
cfs_rq->runnable_load_sum =
@@ -3206,13 +3379,25 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
#endif
/*
+ * Synchronize entity load avg of dequeued entity without locking
+ * the previous rq.
+ */
+void sync_entity_load_avg(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ u64 last_update_time;
+
+ last_update_time = cfs_rq_last_update_time(cfs_rq);
+ __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
+}
+
+/*
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
*/
void remove_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 last_update_time;
/*
* tasks cannot exit without having gone through wake_up_new_task() ->
@@ -3224,9 +3409,7 @@ void remove_entity_load_avg(struct sched_entity *se)
* calls this.
*/
- last_update_time = cfs_rq_last_update_time(cfs_rq);
-
- __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
+ sync_entity_load_avg(se);
atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
}
@@ -3251,7 +3434,10 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
return 0;
}
-static inline void update_load_avg(struct sched_entity *se, int not_used)
+#define UPDATE_TG 0x0
+#define SKIP_AGE_LOAD 0x0
+
+static inline void update_load_avg(struct sched_entity *se, int not_used1)
{
cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
}
@@ -3396,6 +3582,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;
+ update_load_avg(se, UPDATE_TG);
enqueue_entity_load_avg(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
update_cfs_shares(cfs_rq);
@@ -3470,6 +3657,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* Update run-time statistics of the 'current'.
*/
update_curr(cfs_rq);
+ update_load_avg(se, UPDATE_TG);
dequeue_entity_load_avg(cfs_rq, se);
update_stats_dequeue(cfs_rq, se, flags);
@@ -3557,7 +3745,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
update_stats_wait_end(cfs_rq, se);
__dequeue_entity(cfs_rq, se);
- update_load_avg(se, 1);
+ update_load_avg(se, UPDATE_TG);
}
update_stats_curr_start(cfs_rq, se);
@@ -3675,7 +3863,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
/*
* Ensure that runnable average is periodically updated.
*/
- update_load_avg(curr, 1);
+ update_load_avg(curr, UPDATE_TG);
update_cfs_shares(cfs_rq);
#ifdef CONFIG_SCHED_HRTICK
@@ -4572,7 +4760,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, 1);
+ update_load_avg(se, UPDATE_TG);
update_cfs_shares(cfs_rq);
}
@@ -4631,7 +4819,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, 1);
+ update_load_avg(se, UPDATE_TG);
update_cfs_shares(cfs_rq);
}
@@ -5199,6 +5387,14 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
return 1;
}
+static inline int task_util(struct task_struct *p);
+static int cpu_util_wake(int cpu, struct task_struct *p);
+
+static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
+{
+ return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
+}
+
/*
* find_idlest_group finds and returns the least busy CPU group within the
* domain.
@@ -5208,7 +5404,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int sd_flag)
{
struct sched_group *idlest = NULL, *group = sd->groups;
+ struct sched_group *most_spare_sg = NULL;
unsigned long min_load = ULONG_MAX, this_load = 0;
+ unsigned long most_spare = 0, this_spare = 0;
int load_idx = sd->forkexec_idx;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -5216,7 +5414,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
load_idx = sd->wake_idx;
do {
- unsigned long load, avg_load;
+ unsigned long load, avg_load, spare_cap, max_spare_cap;
int local_group;
int i;
@@ -5228,8 +5426,12 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
local_group = cpumask_test_cpu(this_cpu,
sched_group_cpus(group));
- /* Tally up the load of all CPUs in the group */
+ /*
+ * Tally up the load of all CPUs in the group and find
+ * the group containing the CPU with most spare capacity.
+ */
avg_load = 0;
+ max_spare_cap = 0;
for_each_cpu(i, sched_group_cpus(group)) {
/* Bias balancing toward cpus of our domain */
@@ -5239,6 +5441,11 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
load = target_load(i, load_idx);
avg_load += load;
+
+ spare_cap = capacity_spare_wake(i, p);
+
+ if (spare_cap > max_spare_cap)
+ max_spare_cap = spare_cap;
}
/* Adjust by relative CPU capacity of the group */
@@ -5246,12 +5453,33 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
if (local_group) {
this_load = avg_load;
- } else if (avg_load < min_load) {
- min_load = avg_load;
- idlest = group;
+ this_spare = max_spare_cap;
+ } else {
+ if (avg_load < min_load) {
+ min_load = avg_load;
+ idlest = group;
+ }
+
+ if (most_spare < max_spare_cap) {
+ most_spare = max_spare_cap;
+ most_spare_sg = group;
+ }
}
} while (group = group->next, group != sd->groups);
+ /*
+ * The cross-over point between using spare capacity or least load
+ * is too conservative for high utilization tasks on partially
+ * utilized systems if we require spare_capacity > task_util(p),
+ * so we allow for some task stuffing by using
+ * spare_capacity > task_util(p)/2.
+ */
+ if (this_spare > task_util(p) / 2 &&
+ imbalance*this_spare > 100*most_spare)
+ return NULL;
+ else if (most_spare > task_util(p) / 2)
+ return most_spare_sg;
+
if (!idlest || 100*this_load < imbalance*min_load)
return NULL;
return idlest;
@@ -5590,6 +5818,24 @@ static inline int task_util(struct task_struct *p)
}
/*
+ * cpu_util_wake: Compute cpu utilization with any contributions from
+ * the waking task p removed.
+ */
+static int cpu_util_wake(int cpu, struct task_struct *p)
+{
+ unsigned long util, capacity;
+
+ /* Task has no contribution or is new */
+ if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
+ return cpu_util(cpu);
+
+ capacity = capacity_orig_of(cpu);
+ util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0);
+
+ return (util >= capacity) ? capacity : util;
+}
+
+/*
* Disable WAKE_AFFINE in the case where task @p doesn't fit in the
* capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
*
@@ -5607,6 +5853,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
if (max_cap - min_cap < max_cap >> 3)
return 0;
+ /* Bring task utilization in sync with prev_cpu */
+ sync_entity_load_avg(&p->se);
+
return min_cap * 1024 < task_util(p) * capacity_margin;
}
@@ -6641,6 +6890,10 @@ static void update_blocked_averages(int cpu)
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
update_tg_load_avg(cfs_rq, 0);
+
+ /* Propagate pending load changes to the parent */
+ if (cfs_rq->tg->se[cpu])
+ update_load_avg(cfs_rq->tg->se[cpu], 0);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -6845,13 +7098,14 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
cpu_rq(cpu)->cpu_capacity = capacity;
sdg->sgc->capacity = capacity;
+ sdg->sgc->min_capacity = capacity;
}
void update_group_capacity(struct sched_domain *sd, int cpu)
{
struct sched_domain *child = sd->child;
struct sched_group *group, *sdg = sd->groups;
- unsigned long capacity;
+ unsigned long capacity, min_capacity;
unsigned long interval;
interval = msecs_to_jiffies(sd->balance_interval);
@@ -6864,6 +7118,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
}
capacity = 0;
+ min_capacity = ULONG_MAX;
if (child->flags & SD_OVERLAP) {
/*
@@ -6888,11 +7143,12 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
*/
if (unlikely(!rq->sd)) {
capacity += capacity_of(cpu);
- continue;
+ } else {
+ sgc = rq->sd->groups->sgc;
+ capacity += sgc->capacity;
}
- sgc = rq->sd->groups->sgc;
- capacity += sgc->capacity;
+ min_capacity = min(capacity, min_capacity);
}
} else {
/*
@@ -6902,12 +7158,16 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
group = child->groups;
do {
- capacity += group->sgc->capacity;
+ struct sched_group_capacity *sgc = group->sgc;
+
+ capacity += sgc->capacity;
+ min_capacity = min(sgc->min_capacity, min_capacity);
group = group->next;
} while (group != child->groups);
}
sdg->sgc->capacity = capacity;
+ sdg->sgc->min_capacity = min_capacity;
}
/*
@@ -6930,8 +7190,8 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
* cpumask covering 1 cpu of the first group and 3 cpus of the second group.
* Something like:
*
- * { 0 1 2 3 } { 4 5 6 7 }
- * * * * *
+ * { 0 1 2 3 } { 4 5 6 7 }
+ * * * * *
*
* If we were to balance group-wise we'd place two tasks in the first group and
* two tasks in the second group. Clearly this is undesired as it will overload
@@ -7002,6 +7262,17 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
return false;
}
+/*
+ * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-CPU capacity than sched_group ref.
+ */
+static inline bool
+group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+ return sg->sgc->min_capacity * capacity_margin <
+ ref->sgc->min_capacity * 1024;
+}
+
static inline enum
group_type group_classify(struct sched_group *group,
struct sg_lb_stats *sgs)
@@ -7105,6 +7376,20 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (sgs->avg_load <= busiest->avg_load)
return false;
+ if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
+ goto asym_packing;
+
+ /*
+ * Candidate sg has no more than one task per CPU and
+ * has higher per-CPU capacity. Migrating tasks to less
+ * capable CPUs may harm throughput. Maximize throughput,
+ * power/energy consequences are not considered.
+ */
+ if (sgs->sum_nr_running <= sgs->group_weight &&
+ group_smaller_cpu_capacity(sds->local, sg))
+ return false;
+
+asym_packing:
/* This is the busiest node in its class. */
if (!(env->sd->flags & SD_ASYM_PACKING))
return true;
@@ -7113,16 +7398,18 @@ static bool update_sd_pick_busiest(struct lb_env *env,
if (env->idle == CPU_NOT_IDLE)
return true;
/*
- * ASYM_PACKING needs to move all the work to the lowest
- * numbered CPUs in the group, therefore mark all groups
- * higher than ourself as busy.
+ * ASYM_PACKING needs to move all the work to the highest
+ * prority CPUs in the group, therefore mark all groups
+ * of lower priority than ourself as busy.
*/
- if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
+ if (sgs->sum_nr_running &&
+ sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
if (!sds->busiest)
return true;
- /* Prefer to move from highest possible cpu's work */
- if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
+ /* Prefer to move from lowest priority cpu's work */
+ if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
+ sg->asym_prefer_cpu))
return true;
}
@@ -7274,8 +7561,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
if (!sds->busiest)
return 0;
- busiest_cpu = group_first_cpu(sds->busiest);
- if (env->dst_cpu > busiest_cpu)
+ busiest_cpu = sds->busiest->asym_prefer_cpu;
+ if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
return 0;
env->imbalance = DIV_ROUND_CLOSEST(
@@ -7613,10 +7900,11 @@ static int need_active_balance(struct lb_env *env)
/*
* ASYM_PACKING needs to force migrate tasks from busy but
- * higher numbered CPUs in order to pack all tasks in the
- * lowest numbered CPUs.
+ * lower priority CPUs in order to pack all tasks in the
+ * highest priority CPUs.
*/
- if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
+ if ((sd->flags & SD_ASYM_PACKING) &&
+ sched_asym_prefer(env->dst_cpu, env->src_cpu))
return 1;
}
@@ -8465,7 +8753,7 @@ static inline bool nohz_kick_needed(struct rq *rq)
unsigned long now = jiffies;
struct sched_domain_shared *sds;
struct sched_domain *sd;
- int nr_busy, cpu = rq->cpu;
+ int nr_busy, i, cpu = rq->cpu;
bool kick = false;
if (unlikely(rq->idle_balance))
@@ -8516,12 +8804,18 @@ static inline bool nohz_kick_needed(struct rq *rq)
}
sd = rcu_dereference(per_cpu(sd_asym, cpu));
- if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
- sched_domain_span(sd)) < cpu)) {
- kick = true;
- goto unlock;
- }
+ if (sd) {
+ for_each_cpu(i, sched_domain_span(sd)) {
+ if (i == cpu ||
+ !cpumask_test_cpu(i, nohz.idle_cpus_mask))
+ continue;
+ if (sched_asym_prefer(i, cpu)) {
+ kick = true;
+ goto unlock;
+ }
+ }
+ }
unlock:
rcu_read_unlock();
return kick;
@@ -8687,32 +8981,45 @@ static inline bool vruntime_normalized(struct task_struct *p)
return false;
}
-static void detach_task_cfs_rq(struct task_struct *p)
+#ifdef CONFIG_FAIR_GROUP_SCHED
+/*
+ * Propagate the changes of the sched_entity across the tg tree to make it
+ * visible to the root
+ */
+static void propagate_entity_cfs_rq(struct sched_entity *se)
{
- struct sched_entity *se = &p->se;
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 now = cfs_rq_clock_task(cfs_rq);
+ struct cfs_rq *cfs_rq;
- if (!vruntime_normalized(p)) {
- /*
- * Fix up our vruntime so that the current sleep doesn't
- * cause 'unlimited' sleep bonus.
- */
- place_entity(cfs_rq, se, 0);
- se->vruntime -= cfs_rq->min_vruntime;
+ /* Start to propagate at parent */
+ se = se->parent;
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+ if (cfs_rq_throttled(cfs_rq))
+ break;
+
+ update_load_avg(se, UPDATE_TG);
}
+}
+#else
+static void propagate_entity_cfs_rq(struct sched_entity *se) { }
+#endif
+
+static void detach_entity_cfs_rq(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
/* Catch up with the cfs_rq and remove our load when we leave */
- update_cfs_rq_load_avg(now, cfs_rq, false);
+ update_load_avg(se, 0);
detach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
+ propagate_entity_cfs_rq(se);
}
-static void attach_task_cfs_rq(struct task_struct *p)
+static void attach_entity_cfs_rq(struct sched_entity *se)
{
- struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 now = cfs_rq_clock_task(cfs_rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
@@ -8722,10 +9029,36 @@ static void attach_task_cfs_rq(struct task_struct *p)
se->depth = se->parent ? se->parent->depth + 1 : 0;
#endif
- /* Synchronize task with its cfs_rq */
- update_cfs_rq_load_avg(now, cfs_rq, false);
+ /* Synchronize entity with its cfs_rq */
+ update_load_avg(se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
attach_entity_load_avg(cfs_rq, se);
update_tg_load_avg(cfs_rq, false);
+ propagate_entity_cfs_rq(se);
+}
+
+static void detach_task_cfs_rq(struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ if (!vruntime_normalized(p)) {
+ /*
+ * Fix up our vruntime so that the current sleep doesn't
+ * cause 'unlimited' sleep bonus.
+ */
+ place_entity(cfs_rq, se, 0);
+ se->vruntime -= cfs_rq->min_vruntime;
+ }
+
+ detach_entity_cfs_rq(se);
+}
+
+static void attach_task_cfs_rq(struct task_struct *p)
+{
+ struct sched_entity *se = &p->se;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+
+ attach_entity_cfs_rq(se);
if (!vruntime_normalized(p))
se->vruntime += cfs_rq->min_vruntime;
@@ -8779,6 +9112,9 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
#endif
#ifdef CONFIG_SMP
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ cfs_rq->propagate_avg = 0;
+#endif
atomic_long_set(&cfs_rq->removed_load_avg, 0);
atomic_long_set(&cfs_rq->removed_util_avg, 0);
#endif
@@ -8887,7 +9223,7 @@ void online_fair_sched_group(struct task_group *tg)
se = tg->se[i];
raw_spin_lock_irq(&rq->lock);
- post_init_entity_util_avg(se);
+ attach_entity_cfs_rq(se);
sync_throttle(tg, i);
raw_spin_unlock_irq(&rq->lock);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 055f935d4421..7b34c7826ca5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -404,6 +404,7 @@ struct cfs_rq {
unsigned long runnable_load_avg;
#ifdef CONFIG_FAIR_GROUP_SCHED
unsigned long tg_load_avg_contrib;
+ unsigned long propagate_avg;
#endif
atomic_long_t removed_load_avg, removed_util_avg;
#ifndef CONFIG_64BIT
@@ -539,6 +540,11 @@ struct dl_rq {
#ifdef CONFIG_SMP
+static inline bool sched_asym_prefer(int a, int b)
+{
+ return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
+}
+
/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
@@ -623,6 +629,7 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
+ struct list_head *tmp_alone_branch;
#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
@@ -892,7 +899,8 @@ struct sched_group_capacity {
* CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity
* for a single CPU.
*/
- unsigned int capacity;
+ unsigned long capacity;
+ unsigned long min_capacity; /* Min per-CPU capacity in group */
unsigned long next_update;
int imbalance; /* XXX unrelated to capacity but shared group state */
@@ -905,6 +913,7 @@ struct sched_group {
unsigned int group_weight;
struct sched_group_capacity *sgc;
+ int asym_prefer_cpu; /* cpu of highest priority in group */
/*
* The CPUs this group covers.
diff --git a/kernel/signal.c b/kernel/signal.c
index 75761acc77cf..29a410780aa9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -427,6 +427,7 @@ void flush_signals(struct task_struct *t)
spin_unlock_irqrestore(&t->sighand->siglock, flags);
}
+#ifdef CONFIG_POSIX_TIMERS
static void __flush_itimer_signals(struct sigpending *pending)
{
sigset_t signal, retain;
@@ -460,6 +461,7 @@ void flush_itimer_signals(void)
__flush_itimer_signals(&tsk->signal->shared_pending);
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
}
+#endif
void ignore_signals(struct task_struct *t)
{
@@ -567,6 +569,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
if (!signr) {
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info);
+#ifdef CONFIG_POSIX_TIMERS
/*
* itimer signal ?
*
@@ -590,6 +593,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
hrtimer_restart(tmr);
}
}
+#endif
}
recalc_sigpending();
@@ -611,6 +615,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
+#ifdef CONFIG_POSIX_TIMERS
if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
/*
* Release the siglock to ensure proper locking order
@@ -622,6 +627,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
do_schedule_next_timer(info);
spin_lock(&tsk->sighand->siglock);
}
+#endif
return signr;
}
diff --git a/kernel/smp.c b/kernel/smp.c
index bba3b201668d..77fcdb9f2775 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -3,6 +3,9 @@
*
* (C) Jens Axboe <jens.axboe@oracle.com> 2008
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/irq_work.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
@@ -543,19 +546,17 @@ void __init setup_nr_cpu_ids(void)
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}
-void __weak smp_announce(void)
-{
- printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
-}
-
/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
+ int num_nodes, num_cpus;
unsigned int cpu;
idle_threads_init();
cpuhp_threads_init();
+ pr_info("Bringing up secondary CPUs ...\n");
+
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
@@ -564,8 +565,13 @@ void __init smp_init(void)
cpu_up(cpu);
}
+ num_nodes = num_online_nodes();
+ num_cpus = num_online_cpus();
+ pr_info("Brought up %d node%s, %d CPU%s\n",
+ num_nodes, (num_nodes > 1 ? "s" : ""),
+ num_cpus, (num_cpus > 1 ? "s" : ""));
+
/* Any cleanup work */
- smp_announce();
smp_cpus_done(setup_max_cpus);
}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index ec9ab2f01489..1eb82661ecdb 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -194,7 +194,7 @@ static int multi_cpu_stop(void *data)
/* Simple state machine */
do {
/* Chill out and ensure we re-read multi_stop_state. */
- cpu_relax();
+ cpu_relax_yield();
if (msdata->state != curstate) {
curstate = msdata->state;
switch (curstate) {
diff --git a/kernel/sys.c b/kernel/sys.c
index 89d5be418157..78c9fb7dd680 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1416,7 +1416,8 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
* applications, so we live with it
*/
if (!retval && new_rlim && resource == RLIMIT_CPU &&
- new_rlim->rlim_cur != RLIM_INFINITY)
+ new_rlim->rlim_cur != RLIM_INFINITY &&
+ IS_ENABLED(CONFIG_POSIX_TIMERS))
update_rlimit_cpu(tsk, new_rlim->rlim_cur);
out:
read_unlock(&tasklist_lock);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 706309f9ed84..39b3368f6de6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -347,13 +347,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
- .procname = "sched_shares_window_ns",
- .data = &sysctl_sched_shares_window,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
#ifdef CONFIG_SCHEDSTATS
{
.procname = "sched_schedstats",
@@ -990,13 +983,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
{
- .procname = "kstack_depth_to_print",
- .data = &kstack_depth_to_print,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
.procname = "io_delay_type",
.data = &io_delay_type,
.maxlen = sizeof(int),
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 49eca0beed32..976840d29a71 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,6 +1,12 @@
-obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o
+obj-y += time.o timer.o hrtimer.o
obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
-obj-y += timeconv.o timecounter.o posix-clock.o alarmtimer.o
+obj-y += timeconv.o timecounter.o alarmtimer.o
+
+ifeq ($(CONFIG_POSIX_TIMERS),y)
+ obj-y += posix-timers.o posix-cpu-timers.o posix-clock.o itimer.o
+else
+ obj-y += posix-stubs.o
+endif
obj-$(CONFIG_GENERIC_CLOCKEVENTS) += clockevents.o tick-common.o
ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 12dd190634ab..9b08ca391aed 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -26,6 +26,9 @@
#include <linux/workqueue.h>
#include <linux/freezer.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/alarmtimer.h>
+
/**
* struct alarm_base - Alarm timer bases
* @lock: Lock for syncrhonized access to the base
@@ -40,7 +43,9 @@ static struct alarm_base {
clockid_t base_clockid;
} alarm_bases[ALARM_NUMTYPE];
-/* freezer delta & lock used to handle clock_nanosleep triggered wakeups */
+/* freezer information to handle clock_nanosleep triggered wakeups */
+static enum alarmtimer_type freezer_alarmtype;
+static ktime_t freezer_expires;
static ktime_t freezer_delta;
static DEFINE_SPINLOCK(freezer_delta_lock);
@@ -194,6 +199,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
}
spin_unlock_irqrestore(&base->lock, flags);
+ trace_alarmtimer_fired(alarm, base->gettime());
return ret;
}
@@ -218,15 +224,16 @@ EXPORT_SYMBOL_GPL(alarm_expires_remaining);
*/
static int alarmtimer_suspend(struct device *dev)
{
- struct rtc_time tm;
- ktime_t min, now;
- unsigned long flags;
+ ktime_t min, now, expires;
+ int i, ret, type;
struct rtc_device *rtc;
- int i;
- int ret;
+ unsigned long flags;
+ struct rtc_time tm;
spin_lock_irqsave(&freezer_delta_lock, flags);
min = freezer_delta;
+ expires = freezer_expires;
+ type = freezer_alarmtype;
freezer_delta = ktime_set(0, 0);
spin_unlock_irqrestore(&freezer_delta_lock, flags);
@@ -247,8 +254,11 @@ static int alarmtimer_suspend(struct device *dev)
if (!next)
continue;
delta = ktime_sub(next->expires, base->gettime());
- if (!min.tv64 || (delta.tv64 < min.tv64))
+ if (!min.tv64 || (delta.tv64 < min.tv64)) {
+ expires = next->expires;
min = delta;
+ type = i;
+ }
}
if (min.tv64 == 0)
return 0;
@@ -258,6 +268,8 @@ static int alarmtimer_suspend(struct device *dev)
return -EBUSY;
}
+ trace_alarmtimer_suspend(expires, type);
+
/* Setup an rtc timer to fire that far in the future */
rtc_timer_cancel(rtc, &rtctimer);
rtc_read_time(rtc, &tm);
@@ -295,15 +307,32 @@ static int alarmtimer_resume(struct device *dev)
static void alarmtimer_freezerset(ktime_t absexp, enum alarmtimer_type type)
{
- ktime_t delta;
+ struct alarm_base *base;
unsigned long flags;
- struct alarm_base *base = &alarm_bases[type];
+ ktime_t delta;
+
+ switch(type) {
+ case ALARM_REALTIME:
+ base = &alarm_bases[ALARM_REALTIME];
+ type = ALARM_REALTIME_FREEZER;
+ break;
+ case ALARM_BOOTTIME:
+ base = &alarm_bases[ALARM_BOOTTIME];
+ type = ALARM_BOOTTIME_FREEZER;
+ break;
+ default:
+ WARN_ONCE(1, "Invalid alarm type: %d\n", type);
+ return;
+ }
delta = ktime_sub(absexp, base->gettime());
spin_lock_irqsave(&freezer_delta_lock, flags);
- if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64))
+ if (!freezer_delta.tv64 || (delta.tv64 < freezer_delta.tv64)) {
freezer_delta = delta;
+ freezer_expires = absexp;
+ freezer_alarmtype = type;
+ }
spin_unlock_irqrestore(&freezer_delta_lock, flags);
}
@@ -342,6 +371,8 @@ void alarm_start(struct alarm *alarm, ktime_t start)
alarmtimer_enqueue(base, alarm);
hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS);
spin_unlock_irqrestore(&base->lock, flags);
+
+ trace_alarmtimer_start(alarm, base->gettime());
}
EXPORT_SYMBOL_GPL(alarm_start);
@@ -390,6 +421,8 @@ int alarm_try_to_cancel(struct alarm *alarm)
if (ret >= 0)
alarmtimer_dequeue(base, alarm);
spin_unlock_irqrestore(&base->lock, flags);
+
+ trace_alarmtimer_cancel(alarm, base->gettime());
return ret;
}
EXPORT_SYMBOL_GPL(alarm_try_to_cancel);
@@ -846,8 +879,10 @@ static int __init alarmtimer_init(void)
alarmtimer_rtc_timer_init();
- posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
- posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS)) {
+ posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
+ posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
+ }
/* Initialize alarm bases */
alarm_bases[ALARM_REALTIME].base_clockid = CLOCK_REALTIME;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index bb5ec425dfe0..08be5c99d26b 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1742,15 +1742,19 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
* You can set the task state as follows -
*
* %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns.
+ * pass before the routine returns unless the current task is explicitly
+ * woken up, (e.g. by wake_up_process()).
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task.
+ * delivered to the current task or the current task is explicitly woken
+ * up.
*
* The current task state is guaranteed to be TASK_RUNNING when this
* routine returns.
*
- * Returns 0 when the timer has expired otherwise -EINTR
+ * Returns 0 when the timer has expired. If the task was woken before the
+ * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
+ * by an explicit wakeup, it returns -EINTR.
*/
int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
const enum hrtimer_mode mode)
@@ -1772,15 +1776,19 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
* You can set the task state as follows -
*
* %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
- * pass before the routine returns.
+ * pass before the routine returns unless the current task is explicitly
+ * woken up, (e.g. by wake_up_process()).
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task.
+ * delivered to the current task or the current task is explicitly woken
+ * up.
*
* The current task state is guaranteed to be TASK_RUNNING when this
* routine returns.
*
- * Returns 0 when the timer has expired otherwise -EINTR
+ * Returns 0 when the timer has expired. If the task was woken before the
+ * timer expired by a signal (only possible in state TASK_INTERRUPTIBLE) or
+ * by an explicit wakeup, it returns -EINTR.
*/
int __sched schedule_hrtimeout(ktime_t *expires,
const enum hrtimer_mode mode)
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 1d5c7204ddc9..2b9f45bc955d 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -238,6 +238,8 @@ again:
return 0;
}
+#ifdef __ARCH_WANT_SYS_ALARM
+
/**
* alarm_setitimer - set alarm in seconds
*
@@ -250,7 +252,7 @@ again:
* On 32 bit machines the seconds value is limited to (INT_MAX/2) to avoid
* negative timeval settings which would cause immediate expiry.
*/
-unsigned int alarm_setitimer(unsigned int seconds)
+static unsigned int alarm_setitimer(unsigned int seconds)
{
struct itimerval it_new, it_old;
@@ -275,6 +277,17 @@ unsigned int alarm_setitimer(unsigned int seconds)
return it_old.it_value.tv_sec;
}
+/*
+ * For backwards compatibility? This can be done in libc so Alpha
+ * and all newer ports shouldn't need it.
+ */
+SYSCALL_DEFINE1(alarm, unsigned int, seconds)
+{
+ return alarm_setitimer(seconds);
+}
+
+#endif
+
SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
struct itimerval __user *, ovalue)
{
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 39008d78927a..f246763c9947 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -9,7 +9,6 @@
#include <asm/uaccess.h>
#include <linux/kernel_stat.h>
#include <trace/events/timer.h>
-#include <linux/random.h>
#include <linux/tick.h>
#include <linux/workqueue.h>
@@ -133,9 +132,9 @@ static inline unsigned long long prof_ticks(struct task_struct *p)
}
static inline unsigned long long virt_ticks(struct task_struct *p)
{
- cputime_t utime;
+ cputime_t utime, stime;
- task_cputime(p, &utime, NULL);
+ task_cputime(p, &utime, &stime);
return cputime_to_expires(utime);
}
@@ -447,10 +446,7 @@ static void cleanup_timers(struct list_head *head)
*/
void posix_cpu_timers_exit(struct task_struct *tsk)
{
- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
- sizeof(unsigned long long));
cleanup_timers(tsk->cpu_timers);
-
}
void posix_cpu_timers_exit_group(struct task_struct *tsk)
{
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
new file mode 100644
index 000000000000..cd6716e115e8
--- /dev/null
+++ b/kernel/time/posix-stubs.c
@@ -0,0 +1,123 @@
+/*
+ * Dummy stubs used when CONFIG_POSIX_TIMERS=n
+ *
+ * Created by: Nicolas Pitre, July 2016
+ * Copyright: (C) 2016 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/syscalls.h>
+#include <linux/ktime.h>
+#include <linux/timekeeping.h>
+#include <linux/posix-timers.h>
+
+asmlinkage long sys_ni_posix_timers(void)
+{
+ pr_err_once("process %d (%s) attempted a POSIX timer syscall "
+ "while CONFIG_POSIX_TIMERS is not set\n",
+ current->pid, current->comm);
+ return -ENOSYS;
+}
+
+#define SYS_NI(name) SYSCALL_ALIAS(sys_##name, sys_ni_posix_timers)
+
+SYS_NI(timer_create);
+SYS_NI(timer_gettime);
+SYS_NI(timer_getoverrun);
+SYS_NI(timer_settime);
+SYS_NI(timer_delete);
+SYS_NI(clock_adjtime);
+SYS_NI(getitimer);
+SYS_NI(setitimer);
+#ifdef __ARCH_WANT_SYS_ALARM
+SYS_NI(alarm);
+#endif
+
+/*
+ * We preserve minimal support for CLOCK_REALTIME and CLOCK_MONOTONIC
+ * as it is easy to remain compatible with little code. CLOCK_BOOTTIME
+ * is also included for convenience as at least systemd uses it.
+ */
+
+SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
+ const struct timespec __user *, tp)
+{
+ struct timespec new_tp;
+
+ if (which_clock != CLOCK_REALTIME)
+ return -EINVAL;
+ if (copy_from_user(&new_tp, tp, sizeof (*tp)))
+ return -EFAULT;
+ return do_sys_settimeofday(&new_tp, NULL);
+}
+
+SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
+ struct timespec __user *,tp)
+{
+ struct timespec kernel_tp;
+
+ switch (which_clock) {
+ case CLOCK_REALTIME: ktime_get_real_ts(&kernel_tp); break;
+ case CLOCK_MONOTONIC: ktime_get_ts(&kernel_tp); break;
+ case CLOCK_BOOTTIME: get_monotonic_boottime(&kernel_tp); break;
+ default: return -EINVAL;
+ }
+ if (copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
+ return -EFAULT;
+ return 0;
+}
+
+SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct timespec __user *, tp)
+{
+ struct timespec rtn_tp = {
+ .tv_sec = 0,
+ .tv_nsec = hrtimer_resolution,
+ };
+
+ switch (which_clock) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ case CLOCK_BOOTTIME:
+ if (copy_to_user(tp, &rtn_tp, sizeof(rtn_tp)))
+ return -EFAULT;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
+ const struct timespec __user *, rqtp,
+ struct timespec __user *, rmtp)
+{
+ struct timespec t;
+
+ switch (which_clock) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ case CLOCK_BOOTTIME:
+ if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
+ return -EFAULT;
+ if (!timespec_valid(&t))
+ return -EINVAL;
+ return hrtimer_nanosleep(&t, rmtp, flags & TIMER_ABSTIME ?
+ HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
+ which_clock);
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+long clock_nanosleep_restart(struct restart_block *restart_block)
+{
+ return hrtimer_nanosleep_restart(restart_block);
+}
+#endif
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3bcb61b52f6c..71496a20e670 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -390,24 +390,16 @@ static int __init tick_nohz_full_setup(char *str)
}
__setup("nohz_full=", tick_nohz_full_setup);
-static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int tick_nohz_cpu_down(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DOWN_PREPARE:
- /*
- * The boot CPU handles housekeeping duty (unbound timers,
- * workqueues, timekeeping, ...) on behalf of full dynticks
- * CPUs. It must remain online when nohz full is enabled.
- */
- if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
- return NOTIFY_BAD;
- break;
- }
- return NOTIFY_OK;
+ /*
+ * The boot CPU handles housekeeping duty (unbound timers,
+ * workqueues, timekeeping, ...) on behalf of full dynticks
+ * CPUs. It must remain online when nohz full is enabled.
+ */
+ if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
+ return -EBUSY;
+ return 0;
}
static int tick_nohz_init_all(void)
@@ -428,7 +420,7 @@ static int tick_nohz_init_all(void)
void __init tick_nohz_init(void)
{
- int cpu;
+ int cpu, ret;
if (!tick_nohz_full_running) {
if (tick_nohz_init_all() < 0)
@@ -469,7 +461,10 @@ void __init tick_nohz_init(void)
for_each_cpu(cpu, tick_nohz_full_mask)
context_tracking_cpu_set(cpu);
- cpu_notifier(tick_nohz_cpu_down_callback, 0);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "kernel/nohz:predown", NULL,
+ tick_nohz_cpu_down);
+ WARN_ON(ret < 0);
pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
cpumask_pr_args(tick_nohz_full_mask));
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 37dec7e3db43..b2286e94c934 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -425,6 +425,35 @@ u64 ktime_get_raw_fast_ns(void)
}
EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
+/**
+ * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
+ *
+ * To keep it NMI safe since we're accessing from tracing, we're not using a
+ * separate timekeeper with updates to monotonic clock and boot offset
+ * protected with seqlocks. This has the following minor side effects:
+ *
+ * (1) Its possible that a timestamp be taken after the boot offset is updated
+ * but before the timekeeper is updated. If this happens, the new boot offset
+ * is added to the old timekeeping making the clock appear to update slightly
+ * earlier:
+ * CPU 0 CPU 1
+ * timekeeping_inject_sleeptime64()
+ * __timekeeping_inject_sleeptime(tk, delta);
+ * timestamp();
+ * timekeeping_update(tk, TK_CLEAR_NTP...);
+ *
+ * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
+ * partially updated. Since the tk->offs_boot update is a rare event, this
+ * should be a rare occurrence which postprocessing should be able to handle.
+ */
+u64 notrace ktime_get_boot_fast_ns(void)
+{
+ struct timekeeper *tk = &tk_core.timekeeper;
+
+ return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
+}
+EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
+
/* Suspend-time cycles value for halted fast timekeeper. */
static cycle_t cycles_at_suspend;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index c611c47de884..ea4fbf8477a9 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1615,7 +1615,8 @@ void update_process_times(int user_tick)
irq_work_tick();
#endif
scheduler_tick();
- run_posix_cpu_timers(p);
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ run_posix_cpu_timers(p);
}
/**
@@ -1676,19 +1677,6 @@ void run_local_timers(void)
raise_softirq(TIMER_SOFTIRQ);
}
-#ifdef __ARCH_WANT_SYS_ALARM
-
-/*
- * For backwards compatibility? This can be done in libc so Alpha
- * and all newer ports shouldn't need it.
- */
-SYSCALL_DEFINE1(alarm, unsigned int, seconds)
-{
- return alarm_setitimer(seconds);
-}
-
-#endif
-
static void process_timeout(unsigned long __data)
{
wake_up_process((struct task_struct *)__data);
@@ -1705,11 +1693,12 @@ static void process_timeout(unsigned long __data)
* You can set the task state as follows -
*
* %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
- * pass before the routine returns. The routine will return 0
+ * pass before the routine returns unless the current task is explicitly
+ * woken up, (e.g. by wake_up_process())".
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
- * delivered to the current task. In this case the remaining time
- * in jiffies will be returned, or 0 if the timer expired in time
+ * delivered to the current task or the current task is explicitly woken
+ * up.
*
* The current task state is guaranteed to be TASK_RUNNING when this
* routine returns.
@@ -1718,7 +1707,9 @@ static void process_timeout(unsigned long __data)
* the CPU away without a bound on the timeout. In this case the return
* value will be %MAX_SCHEDULE_TIMEOUT.
*
- * In all cases the return value is guaranteed to be non-negative.
+ * Returns 0 when the timer has expired otherwise the remaining time in
+ * jiffies will be returned. In all cases the return value is guaranteed
+ * to be non-negative.
*/
signed long __sched schedule_timeout(signed long timeout)
{
@@ -1910,16 +1901,6 @@ unsigned long msleep_interruptible(unsigned int msecs)
EXPORT_SYMBOL(msleep_interruptible);
-static void __sched do_usleep_range(unsigned long min, unsigned long max)
-{
- ktime_t kmin;
- u64 delta;
-
- kmin = ktime_set(0, min * NSEC_PER_USEC);
- delta = (u64)(max - min) * NSEC_PER_USEC;
- schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
-}
-
/**
* usleep_range - Sleep for an approximate time
* @min: Minimum time in usecs to sleep
@@ -1933,7 +1914,14 @@ static void __sched do_usleep_range(unsigned long min, unsigned long max)
*/
void __sched usleep_range(unsigned long min, unsigned long max)
{
- __set_current_state(TASK_UNINTERRUPTIBLE);
- do_usleep_range(min, max);
+ ktime_t exp = ktime_add_us(ktime_get(), min);
+ u64 delta = (u64)(max - min) * NSEC_PER_USEC;
+
+ for (;;) {
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ /* Do not return before the requested sleep time has elapsed */
+ if (!schedule_hrtimeout_range(&exp, delta, HRTIMER_MODE_ABS))
+ break;
+ }
}
EXPORT_SYMBOL(usleep_range);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 9c143739b8d7..89a2611a1635 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -479,9 +479,7 @@ struct ring_buffer {
struct ring_buffer_per_cpu **buffers;
-#ifdef CONFIG_HOTPLUG_CPU
- struct notifier_block cpu_notify;
-#endif
+ struct hlist_node node;
u64 (*clock)(void);
struct rb_irq_work irq_work;
@@ -1274,11 +1272,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
kfree(cpu_buffer);
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int rb_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu);
-#endif
-
/**
* __ring_buffer_alloc - allocate a new ring_buffer
* @size: the size in bytes per cpu that is needed.
@@ -1296,6 +1289,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
long nr_pages;
int bsize;
int cpu;
+ int ret;
/* keep it in its own cache line */
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1303,7 +1297,7 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!buffer)
return NULL;
- if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer;
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
@@ -1318,17 +1312,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (nr_pages < 2)
nr_pages = 2;
- /*
- * In case of non-hotplug cpu, if the ring-buffer is allocated
- * in early initcall, it will not be notified of secondary cpus.
- * In that off case, we need to allocate for all possible cpus.
- */
-#ifdef CONFIG_HOTPLUG_CPU
- cpu_notifier_register_begin();
- cpumask_copy(buffer->cpumask, cpu_online_mask);
-#else
- cpumask_copy(buffer->cpumask, cpu_possible_mask);
-#endif
buffer->cpus = nr_cpu_ids;
bsize = sizeof(void *) * nr_cpu_ids;
@@ -1337,19 +1320,15 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!buffer->buffers)
goto fail_free_cpumask;
- for_each_buffer_cpu(buffer, cpu) {
- buffer->buffers[cpu] =
- rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
- if (!buffer->buffers[cpu])
- goto fail_free_buffers;
- }
+ cpu = raw_smp_processor_id();
+ cpumask_set_cpu(cpu, buffer->cpumask);
+ buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
+ if (!buffer->buffers[cpu])
+ goto fail_free_buffers;
-#ifdef CONFIG_HOTPLUG_CPU
- buffer->cpu_notify.notifier_call = rb_cpu_notify;
- buffer->cpu_notify.priority = 0;
- __register_cpu_notifier(&buffer->cpu_notify);
- cpu_notifier_register_done();
-#endif
+ ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
+ if (ret < 0)
+ goto fail_free_buffers;
mutex_init(&buffer->mutex);
@@ -1364,9 +1343,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
fail_free_cpumask:
free_cpumask_var(buffer->cpumask);
-#ifdef CONFIG_HOTPLUG_CPU
- cpu_notifier_register_done();
-#endif
fail_free_buffer:
kfree(buffer);
@@ -1383,18 +1359,11 @@ ring_buffer_free(struct ring_buffer *buffer)
{
int cpu;
-#ifdef CONFIG_HOTPLUG_CPU
- cpu_notifier_register_begin();
- __unregister_cpu_notifier(&buffer->cpu_notify);
-#endif
+ cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
for_each_buffer_cpu(buffer, cpu)
rb_free_cpu_buffer(buffer->buffers[cpu]);
-#ifdef CONFIG_HOTPLUG_CPU
- cpu_notifier_register_done();
-#endif
-
kfree(buffer->buffers);
free_cpumask_var(buffer->cpumask);
@@ -4633,62 +4602,48 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
-#ifdef CONFIG_HOTPLUG_CPU
-static int rb_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+/*
+ * We only allocate new buffers, never free them if the CPU goes down.
+ * If we were to free the buffer, then the user would lose any trace that was in
+ * the buffer.
+ */
+int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
{
- struct ring_buffer *buffer =
- container_of(self, struct ring_buffer, cpu_notify);
- long cpu = (long)hcpu;
+ struct ring_buffer *buffer;
long nr_pages_same;
int cpu_i;
unsigned long nr_pages;
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- if (cpumask_test_cpu(cpu, buffer->cpumask))
- return NOTIFY_OK;
-
- nr_pages = 0;
- nr_pages_same = 1;
- /* check if all cpu sizes are same */
- for_each_buffer_cpu(buffer, cpu_i) {
- /* fill in the size from first enabled cpu */
- if (nr_pages == 0)
- nr_pages = buffer->buffers[cpu_i]->nr_pages;
- if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
- nr_pages_same = 0;
- break;
- }
- }
- /* allocate minimum pages, user can later expand it */
- if (!nr_pages_same)
- nr_pages = 2;
- buffer->buffers[cpu] =
- rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
- if (!buffer->buffers[cpu]) {
- WARN(1, "failed to allocate ring buffer on CPU %ld\n",
- cpu);
- return NOTIFY_OK;
+ buffer = container_of(node, struct ring_buffer, node);
+ if (cpumask_test_cpu(cpu, buffer->cpumask))
+ return 0;
+
+ nr_pages = 0;
+ nr_pages_same = 1;
+ /* check if all cpu sizes are same */
+ for_each_buffer_cpu(buffer, cpu_i) {
+ /* fill in the size from first enabled cpu */
+ if (nr_pages == 0)
+ nr_pages = buffer->buffers[cpu_i]->nr_pages;
+ if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
+ nr_pages_same = 0;
+ break;
}
- smp_wmb();
- cpumask_set_cpu(cpu, buffer->cpumask);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- /*
- * Do nothing.
- * If we were to free the buffer, then the user would
- * lose any trace that was in the buffer.
- */
- break;
- default:
- break;
}
- return NOTIFY_OK;
+ /* allocate minimum pages, user can later expand it */
+ if (!nr_pages_same)
+ nr_pages = 2;
+ buffer->buffers[cpu] =
+ rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
+ if (!buffer->buffers[cpu]) {
+ WARN(1, "failed to allocate ring buffer on CPU %u\n",
+ cpu);
+ return -ENOMEM;
+ }
+ smp_wmb();
+ cpumask_set_cpu(cpu, buffer->cpumask);
+ return 0;
}
-#endif
#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
/*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8696ce6bf2f6..54d5270a5042 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1125,6 +1125,7 @@ static struct {
{ trace_clock, "perf", 1 },
{ ktime_get_mono_fast_ns, "mono", 1 },
{ ktime_get_raw_fast_ns, "mono_raw", 1 },
+ { ktime_get_boot_fast_ns, "boot", 1 },
ARCH_TRACE_CLOCKS
};
@@ -7659,10 +7660,21 @@ __init static int tracer_alloc_buffers(void)
raw_spin_lock_init(&global_trace.start_lock);
+ /*
+ * The prepare callbacks allocates some memory for the ring buffer. We
+ * don't free the buffer if the if the CPU goes down. If we were to free
+ * the buffer, then the user would lose any trace that was in the
+ * buffer. The memory will be removed once the "instance" is removed.
+ */
+ ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
+ "trace/RB:preapre", trace_rb_cpu_prepare,
+ NULL);
+ if (ret < 0)
+ goto out_free_cpumask;
/* Used for event triggers */
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
if (!temp_buffer)
- goto out_free_cpumask;
+ goto out_rm_hp_state;
if (trace_create_savedcmd() < 0)
goto out_free_temp_buffer;
@@ -7723,6 +7735,8 @@ out_free_savedcmd:
free_saved_cmdlines_buffer(savedcmd);
out_free_temp_buffer:
ring_buffer_free(temp_buffer);
+out_rm_hp_state:
+ cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
out_free_cpumask:
free_cpumask_var(global_trace.tracing_cpumask);
out_free_buffer_mask:
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ee26429b3db0..2ea39b799611 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1218,7 +1218,7 @@ config DEBUG_BUGVERBOSE
config DEBUG_LIST
bool "Debug linked list manipulation"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL || BUG_ON_DATA_CORRUPTION
help
Enable this to turn on extended checks in the linked-list
walking routines.
@@ -1434,7 +1434,8 @@ config RCU_TRACE
select TRACE_CLOCK
help
This option provides tracing in RCU which presents stats
- in debugfs for debugging RCU implementation.
+ in debugfs for debugging RCU implementation. It also enables
+ additional tracepoints for ftrace-style event tracing.
Say Y here if you want to enable RCU tracing
Say N if you are unsure.
@@ -1964,6 +1965,16 @@ config TEST_STATIC_KEYS
If unsure, say N.
+config BUG_ON_DATA_CORRUPTION
+ bool "Trigger a BUG when data corruption is detected"
+ select DEBUG_LIST
+ help
+ Select this option if the kernel should BUG when it encounters
+ data corruption in kernel memory structures when they get checked
+ for validity.
+
+ If unsure, say N.
+
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 3859bf63561c..7f7bfa55eb6d 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -2,8 +2,7 @@
* Copyright 2006, Red Hat, Inc., Dave Jones
* Released under the General Public License (GPL).
*
- * This file contains the linked list implementations for
- * DEBUG_LIST.
+ * This file contains the linked list validation for DEBUG_LIST.
*/
#include <linux/export.h>
@@ -13,88 +12,48 @@
#include <linux/rculist.h>
/*
- * Insert a new entry between two known consecutive entries.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
+ * Check that the data structures for the list manipulations are reasonably
+ * valid. Failures here indicate memory corruption (and possibly an exploit
+ * attempt).
*/
-void __list_add(struct list_head *new,
- struct list_head *prev,
- struct list_head *next)
+bool __list_add_valid(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
{
- WARN(next->prev != prev,
- "list_add corruption. next->prev should be "
- "prev (%p), but was %p. (next=%p).\n",
+ CHECK_DATA_CORRUPTION(next->prev != prev,
+ "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
prev, next->prev, next);
- WARN(prev->next != next,
- "list_add corruption. prev->next should be "
- "next (%p), but was %p. (prev=%p).\n",
+ CHECK_DATA_CORRUPTION(prev->next != next,
+ "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
next, prev->next, prev);
- WARN(new == prev || new == next,
- "list_add double add: new=%p, prev=%p, next=%p.\n",
- new, prev, next);
- next->prev = new;
- new->next = next;
- new->prev = prev;
- WRITE_ONCE(prev->next, new);
+ CHECK_DATA_CORRUPTION(new == prev || new == next,
+ "list_add double add: new=%p, prev=%p, next=%p.\n",
+ new, prev, next);
+
+ return true;
}
-EXPORT_SYMBOL(__list_add);
+EXPORT_SYMBOL(__list_add_valid);
-void __list_del_entry(struct list_head *entry)
+bool __list_del_entry_valid(struct list_head *entry)
{
struct list_head *prev, *next;
prev = entry->prev;
next = entry->next;
- if (WARN(next == LIST_POISON1,
+ CHECK_DATA_CORRUPTION(next == LIST_POISON1,
"list_del corruption, %p->next is LIST_POISON1 (%p)\n",
- entry, LIST_POISON1) ||
- WARN(prev == LIST_POISON2,
+ entry, LIST_POISON1);
+ CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
"list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
- entry, LIST_POISON2) ||
- WARN(prev->next != entry,
- "list_del corruption. prev->next should be %p, "
- "but was %p\n", entry, prev->next) ||
- WARN(next->prev != entry,
- "list_del corruption. next->prev should be %p, "
- "but was %p\n", entry, next->prev))
- return;
-
- __list_del(prev, next);
-}
-EXPORT_SYMBOL(__list_del_entry);
+ entry, LIST_POISON2);
+ CHECK_DATA_CORRUPTION(prev->next != entry,
+ "list_del corruption. prev->next should be %p, but was %p\n",
+ entry, prev->next);
+ CHECK_DATA_CORRUPTION(next->prev != entry,
+ "list_del corruption. next->prev should be %p, but was %p\n",
+ entry, next->prev);
+ return true;
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty on entry does not return true after this, the entry is
- * in an undefined state.
- */
-void list_del(struct list_head *entry)
-{
- __list_del_entry(entry);
- entry->next = LIST_POISON1;
- entry->prev = LIST_POISON2;
-}
-EXPORT_SYMBOL(list_del);
-
-/*
- * RCU variants.
- */
-void __list_add_rcu(struct list_head *new,
- struct list_head *prev, struct list_head *next)
-{
- WARN(next->prev != prev,
- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
- prev, next->prev, next);
- WARN(prev->next != next,
- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
- next, prev->next, prev);
- new->next = next;
- new->prev = prev;
- rcu_assign_pointer(list_next_rcu(prev), new);
- next->prev = new;
}
-EXPORT_SYMBOL(__list_add_rcu);
+EXPORT_SYMBOL(__list_del_entry_valid);
diff --git a/lib/lockref.c b/lib/lockref.c
index 5a92189ad711..c4bfcb8836cd 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -20,7 +20,7 @@
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
- cpu_relax_lowlatency(); \
+ cpu_relax(); \
} \
} while (0)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 72d36113ccaa..c8cebb137076 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -158,25 +158,21 @@ EXPORT_SYMBOL(percpu_counter_destroy);
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);
-static void compute_batch_value(void)
+static int compute_batch_value(unsigned int cpu)
{
int nr = num_online_cpus();
percpu_counter_batch = max(32, nr*2);
+ return 0;
}
-static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
- unsigned long action, void *hcpu)
+static int percpu_counter_cpu_dead(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
- unsigned int cpu;
struct percpu_counter *fbc;
- compute_batch_value();
- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
- return NOTIFY_OK;
+ compute_batch_value(cpu);
- cpu = (unsigned long)hcpu;
spin_lock_irq(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
@@ -190,7 +186,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
}
spin_unlock_irq(&percpu_counters_lock);
#endif
- return NOTIFY_OK;
+ return 0;
}
/*
@@ -222,8 +218,15 @@ EXPORT_SYMBOL(__percpu_counter_compare);
static int __init percpu_counter_startup(void)
{
- compute_batch_value();
- hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
+ compute_batch_value, NULL);
+ WARN_ON(ret < 0);
+ ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
+ "lib/percpu_cnt:dead", NULL,
+ percpu_counter_cpu_dead);
+ WARN_ON(ret < 0);
return 0;
}
module_init(percpu_counter_startup);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 0d1d23ea7925..270bf106743d 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1641,32 +1641,31 @@ static __init void radix_tree_init_maxnodes(void)
}
}
-static int radix_tree_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int radix_tree_cpu_dead(unsigned int cpu)
{
- int cpu = (long)hcpu;
struct radix_tree_preload *rtp;
struct radix_tree_node *node;
/* Free per-cpu pool of preloaded nodes */
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- rtp = &per_cpu(radix_tree_preloads, cpu);
- while (rtp->nr) {
- node = rtp->nodes;
- rtp->nodes = node->private_data;
- kmem_cache_free(radix_tree_node_cachep, node);
- rtp->nr--;
- }
+ rtp = &per_cpu(radix_tree_preloads, cpu);
+ while (rtp->nr) {
+ node = rtp->nodes;
+ rtp->nodes = node->private_data;
+ kmem_cache_free(radix_tree_node_cachep, node);
+ rtp->nr--;
}
- return NOTIFY_OK;
+ return 0;
}
void __init radix_tree_init(void)
{
+ int ret;
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
radix_tree_node_ctor);
radix_tree_init_maxnodes();
- hotcpu_notifier(radix_tree_callback, 0);
+ ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
+ NULL, radix_tree_cpu_dead);
+ WARN_ON(ret < 0);
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 0409a4ad6ea1..0d37192d9423 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2043,33 +2043,38 @@ void kcompactd_stop(int nid)
* away, we get changed to run anywhere: as the first one comes back,
* restore their cpu bindings.
*/
-static int cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int kcompactd_cpu_online(unsigned int cpu)
{
int nid;
- if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
- for_each_node_state(nid, N_MEMORY) {
- pg_data_t *pgdat = NODE_DATA(nid);
- const struct cpumask *mask;
+ for_each_node_state(nid, N_MEMORY) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ const struct cpumask *mask;
- mask = cpumask_of_node(pgdat->node_id);
+ mask = cpumask_of_node(pgdat->node_id);
- if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
- /* One of our CPUs online: restore mask */
- set_cpus_allowed_ptr(pgdat->kcompactd, mask);
- }
+ if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
+ /* One of our CPUs online: restore mask */
+ set_cpus_allowed_ptr(pgdat->kcompactd, mask);
}
- return NOTIFY_OK;
+ return 0;
}
static int __init kcompactd_init(void)
{
int nid;
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "mm/compaction:online",
+ kcompactd_cpu_online, NULL);
+ if (ret < 0) {
+ pr_err("kcompactd: failed to register hotplug callbacks.\n");
+ return ret;
+ }
for_each_node_state(nid, N_MEMORY)
kcompactd_run(nid);
- hotcpu_notifier(cpu_callback, 0);
return 0;
}
subsys_initcall(kcompactd_init)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d4a6e4001512..f8e35cc66d32 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -737,8 +737,9 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
- if (track_pfn_insert(vma, &pgprot, pfn))
- return VM_FAULT_SIGBUS;
+
+ track_pfn_insert(vma, &pgprot, pfn);
+
insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
return VM_FAULT_NOPAGE;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0f870ba43942..6c2043509fb5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1816,22 +1816,13 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
mutex_unlock(&percpu_charge_mutex);
}
-static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
- unsigned long action,
- void *hcpu)
+static int memcg_hotplug_cpu_dead(unsigned int cpu)
{
- int cpu = (unsigned long)hcpu;
struct memcg_stock_pcp *stock;
- if (action == CPU_ONLINE)
- return NOTIFY_OK;
-
- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
- return NOTIFY_OK;
-
stock = &per_cpu(memcg_stock, cpu);
drain_stock(stock);
- return NOTIFY_OK;
+ return 0;
}
static void reclaim_high(struct mem_cgroup *memcg,
@@ -5774,16 +5765,17 @@ __setup("cgroup.memory=", cgroup_memory);
/*
* subsys_initcall() for memory controller.
*
- * Some parts like hotcpu_notifier() have to be initialized from this context
- * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
- * everything that doesn't depend on a specific mem_cgroup structure should
- * be initialized from here.
+ * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
+ * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
+ * basically everything that doesn't depend on a specific mem_cgroup structure
+ * should be initialized from here.
*/
static int __init mem_cgroup_init(void)
{
int cpu, node;
- hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
+ cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
+ memcg_hotplug_cpu_dead);
for_each_possible_cpu(cpu)
INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
diff --git a/mm/memory.c b/mm/memory.c
index e18c57bdc75c..33f45edf8272 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1637,8 +1637,8 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
- if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)))
- return -EINVAL;
+
+ track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot);
@@ -1655,8 +1655,8 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
- if (track_pfn_insert(vma, &pgprot, pfn))
- return -EINVAL;
+
+ track_pfn_insert(vma, &pgprot, pfn);
/*
* If we don't have pte special, then we have to use the pfn_valid()
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6de9440e3ae2..3dcc54da5637 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6399,8 +6399,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
}
if (pages && s)
- pr_info("Freeing %s memory: %ldK (%p - %p)\n",
- s, pages << (PAGE_SHIFT - 10), start, end);
+ pr_info("Freeing %s memory: %ldK\n",
+ s, pages << (PAGE_SHIFT - 10));
return pages;
}
@@ -6491,38 +6491,39 @@ void __init free_area_init(unsigned long *zones_size)
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
}
-static int page_alloc_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int page_alloc_cpu_dead(unsigned int cpu)
{
- int cpu = (unsigned long)hcpu;
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- lru_add_drain_cpu(cpu);
- drain_pages(cpu);
+ lru_add_drain_cpu(cpu);
+ drain_pages(cpu);
- /*
- * Spill the event counters of the dead processor
- * into the current processors event counters.
- * This artificially elevates the count of the current
- * processor.
- */
- vm_events_fold_cpu(cpu);
+ /*
+ * Spill the event counters of the dead processor
+ * into the current processors event counters.
+ * This artificially elevates the count of the current
+ * processor.
+ */
+ vm_events_fold_cpu(cpu);
- /*
- * Zero the differential counters of the dead processor
- * so that the vm statistics are consistent.
- *
- * This is only okay since the processor is dead and cannot
- * race with what we are doing.
- */
- cpu_vm_stats_fold(cpu);
- }
- return NOTIFY_OK;
+ /*
+ * Zero the differential counters of the dead processor
+ * so that the vm statistics are consistent.
+ *
+ * This is only okay since the processor is dead and cannot
+ * race with what we are doing.
+ */
+ cpu_vm_stats_fold(cpu);
+ return 0;
}
void __init page_alloc_init(void)
{
- hotcpu_notifier(page_alloc_cpu_notify, 0);
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
+ "mm/page_alloc:dead", NULL,
+ page_alloc_cpu_dead);
+ WARN_ON(ret < 0);
}
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d75cdf360730..0c8f28a6d89f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3558,24 +3558,21 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
-static int cpu_callback(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int kswapd_cpu_online(unsigned int cpu)
{
int nid;
- if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
- for_each_node_state(nid, N_MEMORY) {
- pg_data_t *pgdat = NODE_DATA(nid);
- const struct cpumask *mask;
+ for_each_node_state(nid, N_MEMORY) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ const struct cpumask *mask;
- mask = cpumask_of_node(pgdat->node_id);
+ mask = cpumask_of_node(pgdat->node_id);
- if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
- /* One of our CPUs online: restore mask */
- set_cpus_allowed_ptr(pgdat->kswapd, mask);
- }
+ if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
+ /* One of our CPUs online: restore mask */
+ set_cpus_allowed_ptr(pgdat->kswapd, mask);
}
- return NOTIFY_OK;
+ return 0;
}
/*
@@ -3617,12 +3614,15 @@ void kswapd_stop(int nid)
static int __init kswapd_init(void)
{
- int nid;
+ int nid, ret;
swap_setup();
for_each_node_state(nid, N_MEMORY)
kswapd_run(nid);
- hotcpu_notifier(cpu_callback, 0);
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "mm/vmscan:online", kswapd_cpu_online,
+ NULL);
+ WARN_ON(ret < 0);
return 0;
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 604f26a4f696..7c28df36f50f 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1720,75 +1720,66 @@ static void __init start_shepherd_timer(void)
static void __init init_cpu_node_state(void)
{
- int cpu;
+ int node;
- get_online_cpus();
- for_each_online_cpu(cpu)
- node_set_state(cpu_to_node(cpu), N_CPU);
- put_online_cpus();
+ for_each_online_node(node) {
+ if (cpumask_weight(cpumask_of_node(node)) > 0)
+ node_set_state(node, N_CPU);
+ }
}
-static void vmstat_cpu_dead(int node)
+static int vmstat_cpu_online(unsigned int cpu)
{
- int cpu;
-
- get_online_cpus();
- for_each_online_cpu(cpu)
- if (cpu_to_node(cpu) == node)
- goto end;
+ refresh_zone_stat_thresholds();
+ node_set_state(cpu_to_node(cpu), N_CPU);
+ return 0;
+}
- node_clear_state(node, N_CPU);
-end:
- put_online_cpus();
+static int vmstat_cpu_down_prep(unsigned int cpu)
+{
+ cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
+ return 0;
}
-/*
- * Use the cpu notifier to insure that the thresholds are recalculated
- * when necessary.
- */
-static int vmstat_cpuup_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- long cpu = (long)hcpu;
-
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- refresh_zone_stat_thresholds();
- node_set_state(cpu_to_node(cpu), N_CPU);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
- break;
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- refresh_zone_stat_thresholds();
- vmstat_cpu_dead(cpu_to_node(cpu));
- break;
- default:
- break;
- }
- return NOTIFY_OK;
+static int vmstat_cpu_dead(unsigned int cpu)
+{
+ const struct cpumask *node_cpus;
+ int node;
+
+ node = cpu_to_node(cpu);
+
+ refresh_zone_stat_thresholds();
+ node_cpus = cpumask_of_node(node);
+ if (cpumask_weight(node_cpus) > 0)
+ return 0;
+
+ node_clear_state(node, N_CPU);
+ return 0;
}
-static struct notifier_block vmstat_notifier =
- { &vmstat_cpuup_callback, NULL, 0 };
#endif
static int __init setup_vmstat(void)
{
#ifdef CONFIG_SMP
- cpu_notifier_register_begin();
- __register_cpu_notifier(&vmstat_notifier);
+ int ret;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
+ NULL, vmstat_cpu_dead);
+ if (ret < 0)
+ pr_err("vmstat: failed to register 'dead' hotplug state\n");
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
+ vmstat_cpu_online,
+ vmstat_cpu_down_prep);
+ if (ret < 0)
+ pr_err("vmstat: failed to register 'online' hotplug state\n");
+
+ get_online_cpus();
init_cpu_node_state();
+ put_online_cpus();
start_shepherd_timer();
- cpu_notifier_register_done();
#endif
#ifdef CONFIG_PROC_FS
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b0bc023d25c5..9cc3c0b2c2c1 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1284,61 +1284,21 @@ out:
#endif /* CONFIG_PGTABLE_MAPPING */
-static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
- void *pcpu)
+static int zs_cpu_prepare(unsigned int cpu)
{
- int ret, cpu = (long)pcpu;
struct mapping_area *area;
- switch (action) {
- case CPU_UP_PREPARE:
- area = &per_cpu(zs_map_area, cpu);
- ret = __zs_cpu_up(area);
- if (ret)
- return notifier_from_errno(ret);
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- area = &per_cpu(zs_map_area, cpu);
- __zs_cpu_down(area);
- break;
- }
-
- return NOTIFY_OK;
+ area = &per_cpu(zs_map_area, cpu);
+ return __zs_cpu_up(area);
}
-static struct notifier_block zs_cpu_nb = {
- .notifier_call = zs_cpu_notifier
-};
-
-static int zs_register_cpu_notifier(void)
+static int zs_cpu_dead(unsigned int cpu)
{
- int cpu, uninitialized_var(ret);
-
- cpu_notifier_register_begin();
-
- __register_cpu_notifier(&zs_cpu_nb);
- for_each_online_cpu(cpu) {
- ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
- if (notifier_to_errno(ret))
- break;
- }
-
- cpu_notifier_register_done();
- return notifier_to_errno(ret);
-}
-
-static void zs_unregister_cpu_notifier(void)
-{
- int cpu;
-
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu)
- zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu);
- __unregister_cpu_notifier(&zs_cpu_nb);
+ struct mapping_area *area;
- cpu_notifier_register_done();
+ area = &per_cpu(zs_map_area, cpu);
+ __zs_cpu_down(area);
+ return 0;
}
static void __init init_zs_size_classes(void)
@@ -2534,10 +2494,10 @@ static int __init zs_init(void)
if (ret)
goto out;
- ret = zs_register_cpu_notifier();
-
+ ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
+ zs_cpu_prepare, zs_cpu_dead);
if (ret)
- goto notifier_fail;
+ goto hp_setup_fail;
init_zs_size_classes();
@@ -2549,8 +2509,7 @@ static int __init zs_init(void)
return 0;
-notifier_fail:
- zs_unregister_cpu_notifier();
+hp_setup_fail:
zsmalloc_unmount();
out:
return ret;
@@ -2562,7 +2521,7 @@ static void __exit zs_exit(void)
zpool_unregister_driver(&zs_zpool_driver);
#endif
zsmalloc_unmount();
- zs_unregister_cpu_notifier();
+ cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
zs_stat_exit();
}
diff --git a/mm/zswap.c b/mm/zswap.c
index 275b22cc8df4..067a0d62f318 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -118,7 +118,7 @@ struct zswap_pool {
struct kref kref;
struct list_head list;
struct work_struct work;
- struct notifier_block notifier;
+ struct hlist_node node;
char tfm_name[CRYPTO_MAX_ALG_NAME];
};
@@ -352,143 +352,58 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
**********************************/
static DEFINE_PER_CPU(u8 *, zswap_dstmem);
-static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu)
+static int zswap_dstmem_prepare(unsigned int cpu)
{
u8 *dst;
- switch (action) {
- case CPU_UP_PREPARE:
- dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
- if (!dst) {
- pr_err("can't allocate compressor buffer\n");
- return NOTIFY_BAD;
- }
- per_cpu(zswap_dstmem, cpu) = dst;
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- dst = per_cpu(zswap_dstmem, cpu);
- kfree(dst);
- per_cpu(zswap_dstmem, cpu) = NULL;
- break;
- default:
- break;
+ dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+ if (!dst) {
+ pr_err("can't allocate compressor buffer\n");
+ return -ENOMEM;
}
- return NOTIFY_OK;
+ per_cpu(zswap_dstmem, cpu) = dst;
+ return 0;
}
-static int zswap_cpu_dstmem_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
+static int zswap_dstmem_dead(unsigned int cpu)
{
- return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu);
-}
+ u8 *dst;
-static struct notifier_block zswap_dstmem_notifier = {
- .notifier_call = zswap_cpu_dstmem_notifier,
-};
+ dst = per_cpu(zswap_dstmem, cpu);
+ kfree(dst);
+ per_cpu(zswap_dstmem, cpu) = NULL;
-static int __init zswap_cpu_dstmem_init(void)
-{
- unsigned long cpu;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) ==
- NOTIFY_BAD)
- goto cleanup;
- __register_cpu_notifier(&zswap_dstmem_notifier);
- cpu_notifier_register_done();
return 0;
-
-cleanup:
- for_each_online_cpu(cpu)
- __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
- cpu_notifier_register_done();
- return -ENOMEM;
-}
-
-static void zswap_cpu_dstmem_destroy(void)
-{
- unsigned long cpu;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu);
- __unregister_cpu_notifier(&zswap_dstmem_notifier);
- cpu_notifier_register_done();
}
-static int __zswap_cpu_comp_notifier(struct zswap_pool *pool,
- unsigned long action, unsigned long cpu)
+static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
{
+ struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
struct crypto_comp *tfm;
- switch (action) {
- case CPU_UP_PREPARE:
- if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
- break;
- tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
- if (IS_ERR_OR_NULL(tfm)) {
- pr_err("could not alloc crypto comp %s : %ld\n",
- pool->tfm_name, PTR_ERR(tfm));
- return NOTIFY_BAD;
- }
- *per_cpu_ptr(pool->tfm, cpu) = tfm;
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- tfm = *per_cpu_ptr(pool->tfm, cpu);
- if (!IS_ERR_OR_NULL(tfm))
- crypto_free_comp(tfm);
- *per_cpu_ptr(pool->tfm, cpu) = NULL;
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static int zswap_cpu_comp_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
-{
- unsigned long cpu = (unsigned long)pcpu;
- struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier);
-
- return __zswap_cpu_comp_notifier(pool, action, cpu);
-}
+ if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu)))
+ return 0;
-static int zswap_cpu_comp_init(struct zswap_pool *pool)
-{
- unsigned long cpu;
-
- memset(&pool->notifier, 0, sizeof(pool->notifier));
- pool->notifier.notifier_call = zswap_cpu_comp_notifier;
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) ==
- NOTIFY_BAD)
- goto cleanup;
- __register_cpu_notifier(&pool->notifier);
- cpu_notifier_register_done();
+ tfm = crypto_alloc_comp(pool->tfm_name, 0, 0);
+ if (IS_ERR_OR_NULL(tfm)) {
+ pr_err("could not alloc crypto comp %s : %ld\n",
+ pool->tfm_name, PTR_ERR(tfm));
+ return -ENOMEM;
+ }
+ *per_cpu_ptr(pool->tfm, cpu) = tfm;
return 0;
-
-cleanup:
- for_each_online_cpu(cpu)
- __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
- cpu_notifier_register_done();
- return -ENOMEM;
}
-static void zswap_cpu_comp_destroy(struct zswap_pool *pool)
+static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
{
- unsigned long cpu;
+ struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+ struct crypto_comp *tfm;
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu);
- __unregister_cpu_notifier(&pool->notifier);
- cpu_notifier_register_done();
+ tfm = *per_cpu_ptr(pool->tfm, cpu);
+ if (!IS_ERR_OR_NULL(tfm))
+ crypto_free_comp(tfm);
+ *per_cpu_ptr(pool->tfm, cpu) = NULL;
+ return 0;
}
/*********************************
@@ -569,6 +484,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
struct zswap_pool *pool;
char name[38]; /* 'zswap' + 32 char (max) num + \0 */
gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+ int ret;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool) {
@@ -593,7 +509,9 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
goto error;
}
- if (zswap_cpu_comp_init(pool))
+ ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
+ &pool->node);
+ if (ret)
goto error;
pr_debug("using %s compressor\n", pool->tfm_name);
@@ -647,7 +565,7 @@ static void zswap_pool_destroy(struct zswap_pool *pool)
{
zswap_pool_debug("destroying", pool);
- zswap_cpu_comp_destroy(pool);
+ cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
free_percpu(pool->tfm);
zpool_destroy_pool(pool->zpool);
kfree(pool);
@@ -1238,6 +1156,7 @@ static void __exit zswap_debugfs_exit(void) { }
static int __init init_zswap(void)
{
struct zswap_pool *pool;
+ int ret;
zswap_init_started = true;
@@ -1246,11 +1165,20 @@ static int __init init_zswap(void)
goto cache_fail;
}
- if (zswap_cpu_dstmem_init()) {
+ ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
+ zswap_dstmem_prepare, zswap_dstmem_dead);
+ if (ret) {
pr_err("dstmem alloc failed\n");
goto dstmem_fail;
}
+ ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
+ "mm/zswap_pool:prepare",
+ zswap_cpu_comp_prepare,
+ zswap_cpu_comp_dead);
+ if (ret)
+ goto hp_fail;
+
pool = __zswap_pool_create_fallback();
if (!pool) {
pr_err("pool creation failed\n");
@@ -1267,7 +1195,9 @@ static int __init init_zswap(void)
return 0;
pool_fail:
- zswap_cpu_dstmem_destroy();
+ cpuhp_remove_state_nocalls(CPUHP_MM_ZSWP_POOL_PREPARE);
+hp_fail:
+ cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
dstmem_fail:
zswap_entry_cache_destroy();
cache_fail:
diff --git a/net/core/dev.c b/net/core/dev.c
index 1d33ce03365f..6372117f653f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5064,7 +5064,7 @@ count:
return rc;
goto restart;
}
- cpu_relax_lowlatency();
+ cpu_relax();
}
if (napi_poll)
busy_poll_stop(napi, have_poll_lock);
@@ -8008,18 +8008,13 @@ out:
}
EXPORT_SYMBOL_GPL(dev_change_net_namespace);
-static int dev_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *ocpu)
+static int dev_cpu_dead(unsigned int oldcpu)
{
struct sk_buff **list_skb;
struct sk_buff *skb;
- unsigned int cpu, oldcpu = (unsigned long)ocpu;
+ unsigned int cpu;
struct softnet_data *sd, *oldsd;
- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
- return NOTIFY_OK;
-
local_irq_disable();
cpu = smp_processor_id();
sd = &per_cpu(softnet_data, cpu);
@@ -8069,10 +8064,9 @@ static int dev_cpu_callback(struct notifier_block *nfb,
input_queue_head_incr(oldsd);
}
- return NOTIFY_OK;
+ return 0;
}
-
/**
* netdev_increment_features - increment feature set by one
* @all: current feature set
@@ -8406,7 +8400,9 @@ static int __init net_dev_init(void)
open_softirq(NET_TX_SOFTIRQ, net_tx_action);
open_softirq(NET_RX_SOFTIRQ, net_rx_action);
- hotcpu_notifier(dev_cpu_callback, 0);
+ rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
+ NULL, dev_cpu_dead);
+ WARN_ON(rc < 0);
dst_subsys_init();
rc = 0;
out:
diff --git a/net/core/flow.c b/net/core/flow.c
index 18e8893d4be5..f765c11d8df5 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -417,28 +417,20 @@ static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
return 0;
}
-static int flow_cache_cpu(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
{
- struct flow_cache *fc = container_of(nfb, struct flow_cache,
- hotcpu_notifier);
- int res, cpu = (unsigned long) hcpu;
+ struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
+
+ return flow_cache_cpu_prepare(fc, cpu);
+}
+
+static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- res = flow_cache_cpu_prepare(fc, cpu);
- if (res)
- return notifier_from_errno(res);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- __flow_cache_shrink(fc, fcp, 0);
- break;
- }
- return NOTIFY_OK;
+ __flow_cache_shrink(fc, fcp, 0);
+ return 0;
}
int flow_cache_init(struct net *net)
@@ -465,18 +457,8 @@ int flow_cache_init(struct net *net)
if (!fc->percpu)
return -ENOMEM;
- cpu_notifier_register_begin();
-
- for_each_online_cpu(i) {
- if (flow_cache_cpu_prepare(fc, i))
- goto err;
- }
- fc->hotcpu_notifier = (struct notifier_block){
- .notifier_call = flow_cache_cpu,
- };
- __register_hotcpu_notifier(&fc->hotcpu_notifier);
-
- cpu_notifier_register_done();
+ if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
+ goto err;
setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
(unsigned long) fc);
@@ -492,8 +474,6 @@ err:
fcp->hash_table = NULL;
}
- cpu_notifier_register_done();
-
free_percpu(fc->percpu);
fc->percpu = NULL;
@@ -507,7 +487,8 @@ void flow_cache_fini(struct net *net)
struct flow_cache *fc = &net->xfrm.flow_cache_global;
del_timer_sync(&fc->rnd_timer);
- unregister_hotcpu_notifier(&fc->hotcpu_notifier);
+
+ cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
for_each_possible_cpu(i) {
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
@@ -519,3 +500,14 @@ void flow_cache_fini(struct net *net)
fc->percpu = NULL;
}
EXPORT_SYMBOL(flow_cache_fini);
+
+void __init flow_cache_hp_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
+ "net/flow:prepare",
+ flow_cache_cpu_up_prep,
+ flow_cache_cpu_dead);
+ WARN_ON(ret < 0);
+}
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 88a2a3ba4212..8f7ef167c45a 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -639,7 +639,7 @@ static void iucv_disable(void)
put_online_cpus();
}
-static void free_iucv_data(int cpu)
+static int iucv_cpu_dead(unsigned int cpu)
{
kfree(iucv_param_irq[cpu]);
iucv_param_irq[cpu] = NULL;
@@ -647,9 +647,10 @@ static void free_iucv_data(int cpu)
iucv_param[cpu] = NULL;
kfree(iucv_irq_data[cpu]);
iucv_irq_data[cpu] = NULL;
+ return 0;
}
-static int alloc_iucv_data(int cpu)
+static int iucv_cpu_prepare(unsigned int cpu)
{
/* Note: GFP_DMA used to get memory below 2G */
iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
@@ -671,58 +672,38 @@ static int alloc_iucv_data(int cpu)
return 0;
out_free:
- free_iucv_data(cpu);
+ iucv_cpu_dead(cpu);
return -ENOMEM;
}
-static int iucv_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int iucv_cpu_online(unsigned int cpu)
{
- cpumask_t cpumask;
- long cpu = (long) hcpu;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- if (alloc_iucv_data(cpu))
- return notifier_from_errno(-ENOMEM);
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_iucv_data(cpu);
- break;
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- if (!iucv_path_table)
- break;
- smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- if (!iucv_path_table)
- break;
- cpumask_copy(&cpumask, &iucv_buffer_cpumask);
- cpumask_clear_cpu(cpu, &cpumask);
- if (cpumask_empty(&cpumask))
- /* Can't offline last IUCV enabled cpu. */
- return notifier_from_errno(-EINVAL);
- smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
- if (cpumask_empty(&iucv_irq_cpumask))
- smp_call_function_single(
- cpumask_first(&iucv_buffer_cpumask),
- iucv_allow_cpu, NULL, 1);
- break;
- }
- return NOTIFY_OK;
+ if (!iucv_path_table)
+ return 0;
+ iucv_declare_cpu(NULL);
+ return 0;
}
-static struct notifier_block __refdata iucv_cpu_notifier = {
- .notifier_call = iucv_cpu_notify,
-};
+static int iucv_cpu_down_prep(unsigned int cpu)
+{
+ cpumask_t cpumask;
+
+ if (!iucv_path_table)
+ return 0;
+
+ cpumask_copy(&cpumask, &iucv_buffer_cpumask);
+ cpumask_clear_cpu(cpu, &cpumask);
+ if (cpumask_empty(&cpumask))
+ /* Can't offline last IUCV enabled cpu. */
+ return -EINVAL;
+
+ iucv_retrieve_cpu(NULL);
+ if (!cpumask_empty(&iucv_irq_cpumask))
+ return 0;
+ smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
+ iucv_allow_cpu, NULL, 1);
+ return 0;
+}
/**
* iucv_sever_pathid
@@ -2027,6 +2008,7 @@ struct iucv_interface iucv_if = {
};
EXPORT_SYMBOL(iucv_if);
+static enum cpuhp_state iucv_online;
/**
* iucv_init
*
@@ -2035,7 +2017,6 @@ EXPORT_SYMBOL(iucv_if);
static int __init iucv_init(void)
{
int rc;
- int cpu;
if (!MACHINE_IS_VM) {
rc = -EPROTONOSUPPORT;
@@ -2054,23 +2035,19 @@ static int __init iucv_init(void)
goto out_int;
}
- cpu_notifier_register_begin();
-
- for_each_online_cpu(cpu) {
- if (alloc_iucv_data(cpu)) {
- rc = -ENOMEM;
- goto out_free;
- }
- }
- rc = __register_hotcpu_notifier(&iucv_cpu_notifier);
+ rc = cpuhp_setup_state(CPUHP_NET_IUCV_PREPARE, "net/iucv:prepare",
+ iucv_cpu_prepare, iucv_cpu_dead);
if (rc)
- goto out_free;
-
- cpu_notifier_register_done();
+ goto out_dev;
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "net/iucv:online",
+ iucv_cpu_online, iucv_cpu_down_prep);
+ if (rc < 0)
+ goto out_prep;
+ iucv_online = rc;
rc = register_reboot_notifier(&iucv_reboot_notifier);
if (rc)
- goto out_cpu;
+ goto out_remove_hp;
ASCEBC(iucv_error_no_listener, 16);
ASCEBC(iucv_error_no_memory, 16);
ASCEBC(iucv_error_pathid, 16);
@@ -2084,15 +2061,11 @@ static int __init iucv_init(void)
out_reboot:
unregister_reboot_notifier(&iucv_reboot_notifier);
-out_cpu:
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&iucv_cpu_notifier);
-out_free:
- for_each_possible_cpu(cpu)
- free_iucv_data(cpu);
-
- cpu_notifier_register_done();
-
+out_remove_hp:
+ cpuhp_remove_state(iucv_online);
+out_prep:
+ cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE);
+out_dev:
root_device_unregister(iucv_root);
out_int:
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
@@ -2110,7 +2083,6 @@ out:
static void __exit iucv_exit(void)
{
struct iucv_irq_list *p, *n;
- int cpu;
spin_lock_irq(&iucv_queue_lock);
list_for_each_entry_safe(p, n, &iucv_task_queue, list)
@@ -2119,11 +2091,9 @@ static void __exit iucv_exit(void)
kfree(p);
spin_unlock_irq(&iucv_queue_lock);
unregister_reboot_notifier(&iucv_reboot_notifier);
- cpu_notifier_register_begin();
- __unregister_hotcpu_notifier(&iucv_cpu_notifier);
- for_each_possible_cpu(cpu)
- free_iucv_data(cpu);
- cpu_notifier_register_done();
+
+ cpuhp_remove_state_nocalls(iucv_online);
+ cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE);
root_device_unregister(iucv_root);
bus_unregister(&iucv_bus);
unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 5bf7e1bfeac7..177e208e8ff5 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3113,6 +3113,7 @@ static struct pernet_operations __net_initdata xfrm_net_ops = {
void __init xfrm_init(void)
{
+ flow_cache_hp_init();
register_pernet_subsys(&xfrm_net_ops);
seqcount_init(&xfrm_policy_hash_generation);
xfrm_input_init();
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index d0c729ccec20..40882877b513 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -6076,6 +6076,12 @@ sub process {
}
}
+# check for mutex_trylock_recursive usage
+ if ($line =~ /mutex_trylock_recursive/) {
+ ERROR("LOCKING",
+ "recursive locking is bad, do not use this ever.\n" . $herecurr);
+ }
+
# check for lockdep_set_novalidate_class
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
$line =~ /__lockdep_no_validate__\s*\)/ ) {
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index c332684e1b5a..5206d99ddeb8 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -139,7 +139,8 @@ handle_line() {
while read line; do
# Let's see if we have an address in the line
- if [[ $line =~ \[\<([^]]+)\>\] ]]; then
+ if [[ $line =~ \[\<([^]]+)\>\] ]] ||
+ [[ $line =~ [^+\ ]+\+0x[0-9a-f]+/0x[0-9a-f]+ ]]; then
# Translate address to line numbers
handle_line "$line"
# Is it a code line?
diff --git a/scripts/faddr2line b/scripts/faddr2line
index 450b33257339..29df825d375c 100755
--- a/scripts/faddr2line
+++ b/scripts/faddr2line
@@ -105,9 +105,18 @@ __faddr2line() {
# In rare cases there might be duplicates.
while read symbol; do
local fields=($symbol)
- local sym_base=0x${fields[1]}
- local sym_size=${fields[2]}
- local sym_type=${fields[3]}
+ local sym_base=0x${fields[0]}
+ local sym_type=${fields[1]}
+ local sym_end=0x${fields[3]}
+
+ # calculate the size
+ local sym_size=$(($sym_end - $sym_base))
+ if [[ -z $sym_size ]] || [[ $sym_size -le 0 ]]; then
+ warn "bad symbol size: base: $sym_base end: $sym_end"
+ DONE=1
+ return
+ fi
+ sym_size=0x$(printf %x $sym_size)
# calculate the address
local addr=$(($sym_base + $offset))
@@ -116,26 +125,26 @@ __faddr2line() {
DONE=1
return
fi
- local hexaddr=0x$(printf %x $addr)
+ addr=0x$(printf %x $addr)
# weed out non-function symbols
- if [[ $sym_type != "FUNC" ]]; then
+ if [[ $sym_type != t ]] && [[ $sym_type != T ]]; then
[[ $print_warnings = 1 ]] &&
- echo "skipping $func address at $hexaddr due to non-function symbol"
+ echo "skipping $func address at $addr due to non-function symbol of type '$sym_type'"
continue
fi
# if the user provided a size, make sure it matches the symbol's size
if [[ -n $size ]] && [[ $size -ne $sym_size ]]; then
[[ $print_warnings = 1 ]] &&
- echo "skipping $func address at $hexaddr due to size mismatch ($size != $sym_size)"
+ echo "skipping $func address at $addr due to size mismatch ($size != $sym_size)"
continue;
fi
# make sure the provided offset is within the symbol's range
if [[ $offset -gt $sym_size ]]; then
[[ $print_warnings = 1 ]] &&
- echo "skipping $func address at $hexaddr due to size mismatch ($offset > $sym_size)"
+ echo "skipping $func address at $addr due to size mismatch ($offset > $sym_size)"
continue
fi
@@ -143,12 +152,12 @@ __faddr2line() {
[[ $FIRST = 0 ]] && echo
FIRST=0
- local hexsize=0x$(printf %x $sym_size)
- echo "$func+$offset/$hexsize:"
- addr2line -fpie $objfile $hexaddr | sed "s; $dir_prefix\(\./\)*; ;"
+ # pass real address to addr2line
+ echo "$func+$offset/$sym_size:"
+ addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
DONE=1
- done < <(readelf -sW $objfile | awk -v f=$func '$8 == f {print}')
+ done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }')
}
[[ $# -lt 2 ]] && usage
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 973b6f733368..a73f762c48d6 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -85,6 +85,7 @@ struct symbol {
struct property *prop;
struct expr_value dir_dep;
struct expr_value rev_dep;
+ struct expr_value implied;
};
#define for_all_symbols(i, sym) for (i = 0; i < SYMBOL_HASHSIZE; i++) for (sym = symbol_hash[i]; sym; sym = sym->next) if (sym->type != S_OTHER)
@@ -136,6 +137,7 @@ enum prop_type {
P_DEFAULT, /* default y */
P_CHOICE, /* choice value */
P_SELECT, /* select BAR */
+ P_IMPLY, /* imply BAR */
P_RANGE, /* range 7..100 (for a symbol) */
P_ENV, /* value from environment variable */
P_SYMBOL, /* where a symbol is defined */
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index aed678e8a777..e9357931b47d 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -233,6 +233,8 @@ static void sym_check_prop(struct symbol *sym)
{
struct property *prop;
struct symbol *sym2;
+ char *use;
+
for (prop = sym->prop; prop; prop = prop->next) {
switch (prop->type) {
case P_DEFAULT:
@@ -252,18 +254,20 @@ static void sym_check_prop(struct symbol *sym)
}
break;
case P_SELECT:
+ case P_IMPLY:
+ use = prop->type == P_SELECT ? "select" : "imply";
sym2 = prop_get_symbol(prop);
if (sym->type != S_BOOLEAN && sym->type != S_TRISTATE)
prop_warn(prop,
- "config symbol '%s' uses select, but is "
- "not boolean or tristate", sym->name);
+ "config symbol '%s' uses %s, but is "
+ "not boolean or tristate", sym->name, use);
else if (sym2->type != S_UNKNOWN &&
sym2->type != S_BOOLEAN &&
sym2->type != S_TRISTATE)
prop_warn(prop,
- "'%s' has wrong type. 'select' only "
+ "'%s' has wrong type. '%s' only "
"accept arguments of boolean and "
- "tristate type", sym2->name);
+ "tristate type", sym2->name, use);
break;
case P_RANGE:
if (sym->type != S_INT && sym->type != S_HEX)
@@ -333,6 +337,10 @@ void menu_finalize(struct menu *parent)
struct symbol *es = prop_get_symbol(prop);
es->rev_dep.expr = expr_alloc_or(es->rev_dep.expr,
expr_alloc_and(expr_alloc_symbol(menu->sym), expr_copy(dep)));
+ } else if (prop->type == P_IMPLY) {
+ struct symbol *es = prop_get_symbol(prop);
+ es->implied.expr = expr_alloc_or(es->implied.expr,
+ expr_alloc_and(expr_alloc_symbol(menu->sym), expr_copy(dep)));
}
}
}
@@ -612,13 +620,30 @@ static struct property *get_symbol_prop(struct symbol *sym)
return prop;
}
+static void get_symbol_props_str(struct gstr *r, struct symbol *sym,
+ enum prop_type tok, const char *prefix)
+{
+ bool hit = false;
+ struct property *prop;
+
+ for_all_properties(sym, prop, tok) {
+ if (!hit) {
+ str_append(r, prefix);
+ hit = true;
+ } else
+ str_printf(r, " && ");
+ expr_gstr_print(prop->expr, r);
+ }
+ if (hit)
+ str_append(r, "\n");
+}
+
/*
* head is optional and may be NULL
*/
static void get_symbol_str(struct gstr *r, struct symbol *sym,
struct list_head *head)
{
- bool hit;
struct property *prop;
if (sym && sym->name) {
@@ -648,22 +673,20 @@ static void get_symbol_str(struct gstr *r, struct symbol *sym,
}
}
- hit = false;
- for_all_properties(sym, prop, P_SELECT) {
- if (!hit) {
- str_append(r, " Selects: ");
- hit = true;
- } else
- str_printf(r, " && ");
- expr_gstr_print(prop->expr, r);
- }
- if (hit)
- str_append(r, "\n");
+ get_symbol_props_str(r, sym, P_SELECT, _(" Selects: "));
if (sym->rev_dep.expr) {
str_append(r, _(" Selected by: "));
expr_gstr_print(sym->rev_dep.expr, r);
str_append(r, "\n");
}
+
+ get_symbol_props_str(r, sym, P_IMPLY, _(" Implies: "));
+ if (sym->implied.expr) {
+ str_append(r, _(" Implied by: "));
+ expr_gstr_print(sym->implied.expr, r);
+ str_append(r, "\n");
+ }
+
str_append(r, "\n\n");
}
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 2432298487fb..20136ffefb23 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -258,6 +258,15 @@ static void sym_calc_visibility(struct symbol *sym)
sym->rev_dep.tri = tri;
sym_set_changed(sym);
}
+ tri = no;
+ if (sym->implied.expr && sym->dir_dep.tri != no)
+ tri = expr_calc_value(sym->implied.expr);
+ if (tri == mod && sym_get_type(sym) == S_BOOLEAN)
+ tri = yes;
+ if (sym->implied.tri != tri) {
+ sym->implied.tri = tri;
+ sym_set_changed(sym);
+ }
}
/*
@@ -397,6 +406,10 @@ void sym_calc_value(struct symbol *sym)
newval.tri = EXPR_AND(expr_calc_value(prop->expr),
prop->visible.tri);
}
+ if (sym->implied.tri != no) {
+ sym->flags |= SYMBOL_WRITE;
+ newval.tri = EXPR_OR(newval.tri, sym->implied.tri);
+ }
}
calc_newval:
if (sym->dir_dep.tri == no && sym->rev_dep.tri != no) {
@@ -413,7 +426,8 @@ void sym_calc_value(struct symbol *sym)
}
newval.tri = EXPR_OR(newval.tri, sym->rev_dep.tri);
}
- if (newval.tri == mod && sym_get_type(sym) == S_BOOLEAN)
+ if (newval.tri == mod &&
+ (sym_get_type(sym) == S_BOOLEAN || sym->implied.tri == yes))
newval.tri = yes;
break;
case S_STRING:
@@ -498,6 +512,8 @@ bool sym_tristate_within_range(struct symbol *sym, tristate val)
return false;
if (sym->visible <= sym->rev_dep.tri)
return false;
+ if (sym->implied.tri == yes && val == mod)
+ return false;
if (sym_is_choice_value(sym) && sym->visible == yes)
return val == yes;
return val >= sym->rev_dep.tri && val <= sym->visible;
@@ -750,6 +766,10 @@ const char *sym_get_string_default(struct symbol *sym)
if (sym->type == S_BOOLEAN && val == mod)
val = yes;
+ /* adjust the default value if this symbol is implied by another */
+ if (val < sym->implied.tri)
+ val = sym->implied.tri;
+
switch (sym->type) {
case S_BOOLEAN:
case S_TRISTATE:
@@ -1352,6 +1372,8 @@ const char *prop_get_type_name(enum prop_type type)
return "choice";
case P_SELECT:
return "select";
+ case P_IMPLY:
+ return "imply";
case P_RANGE:
return "range";
case P_SYMBOL:
diff --git a/scripts/kconfig/zconf.gperf b/scripts/kconfig/zconf.gperf
index ac498f01b449..ead02edec936 100644
--- a/scripts/kconfig/zconf.gperf
+++ b/scripts/kconfig/zconf.gperf
@@ -38,6 +38,7 @@ int, T_TYPE, TF_COMMAND, S_INT
hex, T_TYPE, TF_COMMAND, S_HEX
string, T_TYPE, TF_COMMAND, S_STRING
select, T_SELECT, TF_COMMAND
+imply, T_IMPLY, TF_COMMAND
range, T_RANGE, TF_COMMAND
visible, T_VISIBLE, TF_COMMAND
option, T_OPTION, TF_COMMAND
diff --git a/scripts/kconfig/zconf.hash.c_shipped b/scripts/kconfig/zconf.hash.c_shipped
index 360a62df2b5e..d51b15de074a 100644
--- a/scripts/kconfig/zconf.hash.c_shipped
+++ b/scripts/kconfig/zconf.hash.c_shipped
@@ -55,10 +55,10 @@ kconf_id_hash (register const char *str, register unsigned int len)
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
- 73, 73, 73, 73, 73, 73, 73, 5, 25, 25,
+ 73, 73, 73, 73, 73, 73, 73, 10, 25, 25,
0, 0, 0, 5, 0, 0, 73, 73, 5, 0,
10, 5, 45, 73, 20, 20, 0, 15, 15, 73,
- 20, 5, 73, 73, 73, 73, 73, 73, 73, 73,
+ 20, 0, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
73, 73, 73, 73, 73, 73, 73, 73, 73, 73,
@@ -120,6 +120,7 @@ struct kconf_id_strings_t
char kconf_id_strings_str43[sizeof("hex")];
char kconf_id_strings_str46[sizeof("config")];
char kconf_id_strings_str47[sizeof("boolean")];
+ char kconf_id_strings_str50[sizeof("imply")];
char kconf_id_strings_str51[sizeof("string")];
char kconf_id_strings_str54[sizeof("help")];
char kconf_id_strings_str56[sizeof("prompt")];
@@ -157,6 +158,7 @@ static const struct kconf_id_strings_t kconf_id_strings_contents =
"hex",
"config",
"boolean",
+ "imply",
"string",
"help",
"prompt",
@@ -174,7 +176,7 @@ kconf_id_lookup (register const char *str, register unsigned int len)
{
enum
{
- TOTAL_KEYWORDS = 34,
+ TOTAL_KEYWORDS = 35,
MIN_WORD_LENGTH = 2,
MAX_WORD_LENGTH = 14,
MIN_HASH_VALUE = 2,
@@ -205,15 +207,15 @@ kconf_id_lookup (register const char *str, register unsigned int len)
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_TRISTATE},
#line 36 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
-#line 46 "scripts/kconfig/zconf.gperf"
+#line 47 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_OPT_DEFCONFIG_LIST,TF_OPTION},
{-1}, {-1},
-#line 44 "scripts/kconfig/zconf.gperf"
+#line 45 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_ON, TF_PARAM},
#line 29 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_OPTIONAL, TF_COMMAND},
{-1}, {-1},
-#line 43 "scripts/kconfig/zconf.gperf"
+#line 44 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_OPTION, TF_COMMAND},
#line 17 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_ENDMENU, TF_COMMAND},
@@ -223,9 +225,9 @@ kconf_id_lookup (register const char *str, register unsigned int len)
#line 23 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str25, T_MENUCONFIG, TF_COMMAND},
{-1},
-#line 45 "scripts/kconfig/zconf.gperf"
+#line 46 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_OPT_MODULES, TF_OPTION},
-#line 48 "scripts/kconfig/zconf.gperf"
+#line 49 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_OPT_ALLNOCONFIG_Y,TF_OPTION},
#line 16 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str29, T_MENU, TF_COMMAND},
@@ -234,10 +236,10 @@ kconf_id_lookup (register const char *str, register unsigned int len)
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_SELECT, TF_COMMAND},
#line 21 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_COMMENT, TF_COMMAND},
-#line 47 "scripts/kconfig/zconf.gperf"
+#line 48 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_OPT_ENV, TF_OPTION},
{-1},
-#line 41 "scripts/kconfig/zconf.gperf"
+#line 42 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str35, T_RANGE, TF_COMMAND},
#line 19 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str36, T_CHOICE, TF_COMMAND},
@@ -247,7 +249,7 @@ kconf_id_lookup (register const char *str, register unsigned int len)
{-1},
#line 18 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_SOURCE, TF_COMMAND},
-#line 42 "scripts/kconfig/zconf.gperf"
+#line 43 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str42, T_VISIBLE, TF_COMMAND},
#line 38 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str43, T_TYPE, TF_COMMAND, S_HEX},
@@ -256,7 +258,9 @@ kconf_id_lookup (register const char *str, register unsigned int len)
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_CONFIG, TF_COMMAND},
#line 35 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str47, T_TYPE, TF_COMMAND, S_BOOLEAN},
- {-1}, {-1}, {-1},
+ {-1}, {-1},
+#line 41 "scripts/kconfig/zconf.gperf"
+ {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str50, T_IMPLY, TF_COMMAND},
#line 39 "scripts/kconfig/zconf.gperf"
{(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str51, T_TYPE, TF_COMMAND, S_STRING},
{-1}, {-1},
@@ -289,5 +293,5 @@ kconf_id_lookup (register const char *str, register unsigned int len)
}
return 0;
}
-#line 49 "scripts/kconfig/zconf.gperf"
+#line 50 "scripts/kconfig/zconf.gperf"
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index 7a4d658c2066..65b7515a577c 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -1,19 +1,19 @@
-/* A Bison parser, made by GNU Bison 2.5.1. */
+/* A Bison parser, made by GNU Bison 3.0.4. */
/* Bison implementation for Yacc-like parsers in C
-
- Copyright (C) 1984, 1989-1990, 2000-2012 Free Software Foundation, Inc.
-
+
+ Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
+
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
@@ -26,7 +26,7 @@
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
-
+
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
@@ -44,7 +44,7 @@
#define YYBISON 1
/* Bison version. */
-#define YYBISON_VERSION "2.5.1"
+#define YYBISON_VERSION "3.0.4"
/* Skeleton name. */
#define YYSKELETON_NAME "yacc.c"
@@ -58,18 +58,16 @@
/* Pull parsers. */
#define YYPULL 1
-/* Using locations. */
-#define YYLSP_NEEDED 0
/* Substitute the variable and function names. */
#define yyparse zconfparse
#define yylex zconflex
#define yyerror zconferror
-#define yylval zconflval
-#define yychar zconfchar
#define yydebug zconfdebug
#define yynerrs zconfnerrs
+#define yylval zconflval
+#define yychar zconfchar
/* Copy the first part of user declarations. */
@@ -108,19 +106,14 @@ static struct menu *current_menu, *current_entry;
-# ifndef YY_NULL
+# ifndef YY_NULLPTR
# if defined __cplusplus && 201103L <= __cplusplus
-# define YY_NULL nullptr
+# define YY_NULLPTR nullptr
# else
-# define YY_NULL 0
+# define YY_NULLPTR 0
# endif
# endif
-/* Enabling traces. */
-#ifndef YYDEBUG
-# define YYDEBUG 1
-#endif
-
/* Enabling verbose error messages. */
#ifdef YYERROR_VERBOSE
# undef YYERROR_VERBOSE
@@ -129,62 +122,65 @@ static struct menu *current_menu, *current_entry;
# define YYERROR_VERBOSE 0
#endif
-/* Enabling the token table. */
-#ifndef YYTOKEN_TABLE
-# define YYTOKEN_TABLE 0
-#endif
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int zconfdebug;
+#endif
-/* Tokens. */
+/* Token type. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
- /* Put the tokens into the symbol table, so that GDB and other debuggers
- know about them. */
- enum yytokentype {
- T_MAINMENU = 258,
- T_MENU = 259,
- T_ENDMENU = 260,
- T_SOURCE = 261,
- T_CHOICE = 262,
- T_ENDCHOICE = 263,
- T_COMMENT = 264,
- T_CONFIG = 265,
- T_MENUCONFIG = 266,
- T_HELP = 267,
- T_HELPTEXT = 268,
- T_IF = 269,
- T_ENDIF = 270,
- T_DEPENDS = 271,
- T_OPTIONAL = 272,
- T_PROMPT = 273,
- T_TYPE = 274,
- T_DEFAULT = 275,
- T_SELECT = 276,
- T_RANGE = 277,
- T_VISIBLE = 278,
- T_OPTION = 279,
- T_ON = 280,
- T_WORD = 281,
- T_WORD_QUOTE = 282,
- T_UNEQUAL = 283,
- T_LESS = 284,
- T_LESS_EQUAL = 285,
- T_GREATER = 286,
- T_GREATER_EQUAL = 287,
- T_CLOSE_PAREN = 288,
- T_OPEN_PAREN = 289,
- T_EOL = 290,
- T_OR = 291,
- T_AND = 292,
- T_EQUAL = 293,
- T_NOT = 294
- };
+ enum yytokentype
+ {
+ T_MAINMENU = 258,
+ T_MENU = 259,
+ T_ENDMENU = 260,
+ T_SOURCE = 261,
+ T_CHOICE = 262,
+ T_ENDCHOICE = 263,
+ T_COMMENT = 264,
+ T_CONFIG = 265,
+ T_MENUCONFIG = 266,
+ T_HELP = 267,
+ T_HELPTEXT = 268,
+ T_IF = 269,
+ T_ENDIF = 270,
+ T_DEPENDS = 271,
+ T_OPTIONAL = 272,
+ T_PROMPT = 273,
+ T_TYPE = 274,
+ T_DEFAULT = 275,
+ T_SELECT = 276,
+ T_IMPLY = 277,
+ T_RANGE = 278,
+ T_VISIBLE = 279,
+ T_OPTION = 280,
+ T_ON = 281,
+ T_WORD = 282,
+ T_WORD_QUOTE = 283,
+ T_UNEQUAL = 284,
+ T_LESS = 285,
+ T_LESS_EQUAL = 286,
+ T_GREATER = 287,
+ T_GREATER_EQUAL = 288,
+ T_CLOSE_PAREN = 289,
+ T_OPEN_PAREN = 290,
+ T_EOL = 291,
+ T_OR = 292,
+ T_AND = 293,
+ T_EQUAL = 294,
+ T_NOT = 295
+ };
#endif
-
-
+/* Value type. */
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
-typedef union YYSTYPE
+
+union YYSTYPE
{
@@ -196,14 +192,20 @@ typedef union YYSTYPE
const struct kconf_id *id;
+};
-} YYSTYPE;
+typedef union YYSTYPE YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
-# define yystype YYSTYPE /* obsolescent; will be withdrawn */
# define YYSTYPE_IS_DECLARED 1
#endif
+extern YYSTYPE zconflval;
+
+int zconfparse (void);
+
+
+
/* Copy the second part of user declarations. */
@@ -224,11 +226,8 @@ typedef unsigned char yytype_uint8;
#ifdef YYTYPE_INT8
typedef YYTYPE_INT8 yytype_int8;
-#elif (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-typedef signed char yytype_int8;
#else
-typedef short int yytype_int8;
+typedef signed char yytype_int8;
#endif
#ifdef YYTYPE_UINT16
@@ -248,8 +247,7 @@ typedef short int yytype_int16;
# define YYSIZE_T __SIZE_TYPE__
# elif defined size_t
# define YYSIZE_T size_t
-# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
+# elif ! defined YYSIZE_T
# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
# define YYSIZE_T size_t
# else
@@ -263,38 +261,67 @@ typedef short int yytype_int16;
# if defined YYENABLE_NLS && YYENABLE_NLS
# if ENABLE_NLS
# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
-# define YY_(msgid) dgettext ("bison-runtime", msgid)
+# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
# endif
# endif
# ifndef YY_
-# define YY_(msgid) msgid
+# define YY_(Msgid) Msgid
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE
+# if (defined __GNUC__ \
+ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \
+ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C
+# define YY_ATTRIBUTE(Spec) __attribute__(Spec)
+# else
+# define YY_ATTRIBUTE(Spec) /* empty */
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_PURE
+# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__))
+#endif
+
+#ifndef YY_ATTRIBUTE_UNUSED
+# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__))
+#endif
+
+#if !defined _Noreturn \
+ && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112)
+# if defined _MSC_VER && 1200 <= _MSC_VER
+# define _Noreturn __declspec (noreturn)
+# else
+# define _Noreturn YY_ATTRIBUTE ((__noreturn__))
# endif
#endif
/* Suppress unused-variable warnings by "using" E. */
#if ! defined lint || defined __GNUC__
-# define YYUSE(e) ((void) (e))
+# define YYUSE(E) ((void) (E))
#else
-# define YYUSE(e) /* empty */
+# define YYUSE(E) /* empty */
#endif
-/* Identity function, used to suppress warnings about constant conditions. */
-#ifndef lint
-# define YYID(n) (n)
-#else
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-static int
-YYID (int yyi)
+#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
+/* Suppress an incorrect diagnostic about yylval being uninitialized. */
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\
+ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
+ _Pragma ("GCC diagnostic pop")
#else
-static int
-YYID (yyi)
- int yyi;
+# define YY_INITIAL_VALUE(Value) Value
#endif
-{
- return yyi;
-}
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
+#endif
+
#if ! defined yyoverflow || YYERROR_VERBOSE
@@ -313,8 +340,7 @@ YYID (yyi)
# define alloca _alloca
# else
# define YYSTACK_ALLOC alloca
-# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
/* Use EXIT_SUCCESS as a witness for stdlib.h. */
# ifndef EXIT_SUCCESS
@@ -326,8 +352,8 @@ YYID (yyi)
# endif
# ifdef YYSTACK_ALLOC
- /* Pacify GCC's `empty if-body' warning. */
-# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0))
+ /* Pacify GCC's 'empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
# ifndef YYSTACK_ALLOC_MAXIMUM
/* The OS might guarantee only one guard page at the bottom of the stack,
and a page size can be as small as 4096 bytes. So we cannot safely
@@ -343,7 +369,7 @@ YYID (yyi)
# endif
# if (defined __cplusplus && ! defined EXIT_SUCCESS \
&& ! ((defined YYMALLOC || defined malloc) \
- && (defined YYFREE || defined free)))
+ && (defined YYFREE || defined free)))
# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
# ifndef EXIT_SUCCESS
# define EXIT_SUCCESS 0
@@ -351,15 +377,13 @@ YYID (yyi)
# endif
# ifndef YYMALLOC
# define YYMALLOC malloc
-# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
+# if ! defined malloc && ! defined EXIT_SUCCESS
void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
# ifndef YYFREE
# define YYFREE free
-# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
+# if ! defined free && ! defined EXIT_SUCCESS
void free (void *); /* INFRINGES ON USER NAME SPACE */
# endif
# endif
@@ -369,7 +393,7 @@ void free (void *); /* INFRINGES ON USER NAME SPACE */
#if (! defined yyoverflow \
&& (! defined __cplusplus \
- || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
/* A type that is properly aligned for any stack member. */
union yyalloc
@@ -394,16 +418,16 @@ union yyalloc
elements in the stack, and YYPTR gives the new location of the
stack. Advance YYPTR to a properly aligned location for the next
stack. */
-# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
- do \
- { \
- YYSIZE_T yynewbytes; \
- YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
- Stack = &yyptr->Stack_alloc; \
- yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
- yyptr += yynewbytes / sizeof (*yyptr); \
- } \
- while (YYID (0))
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYSIZE_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / sizeof (*yyptr); \
+ } \
+ while (0)
#endif
@@ -422,7 +446,7 @@ union yyalloc
for (yyi = 0; yyi < (Count); yyi++) \
(Dst)[yyi] = (Src)[yyi]; \
} \
- while (YYID (0))
+ while (0)
# endif
# endif
#endif /* !YYCOPY_NEEDED */
@@ -430,25 +454,27 @@ union yyalloc
/* YYFINAL -- State number of the termination state. */
#define YYFINAL 11
/* YYLAST -- Last index in YYTABLE. */
-#define YYLAST 298
+#define YYLAST 301
/* YYNTOKENS -- Number of terminals. */
-#define YYNTOKENS 40
+#define YYNTOKENS 41
/* YYNNTS -- Number of nonterminals. */
#define YYNNTS 50
/* YYNRULES -- Number of rules. */
-#define YYNRULES 122
-/* YYNRULES -- Number of states. */
-#define YYNSTATES 199
+#define YYNRULES 124
+/* YYNSTATES -- Number of states. */
+#define YYNSTATES 204
-/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
+/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned
+ by yylex, with out-of-bounds checking. */
#define YYUNDEFTOK 2
-#define YYMAXUTOK 294
+#define YYMAXUTOK 295
-#define YYTRANSLATE(YYX) \
+#define YYTRANSLATE(YYX) \
((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
-/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */
+/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex, without out-of-bounds checking. */
static const yytype_uint8 yytranslate[] =
{
0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
@@ -480,90 +506,30 @@ static const yytype_uint8 yytranslate[] =
5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
- 35, 36, 37, 38, 39
+ 35, 36, 37, 38, 39, 40
};
#if YYDEBUG
-/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in
- YYRHS. */
-static const yytype_uint16 yyprhs[] =
-{
- 0, 0, 3, 6, 8, 11, 13, 14, 17, 20,
- 23, 26, 31, 36, 40, 42, 44, 46, 48, 50,
- 52, 54, 56, 58, 60, 62, 64, 66, 68, 72,
- 75, 79, 82, 86, 89, 90, 93, 96, 99, 102,
- 105, 108, 112, 117, 122, 127, 133, 137, 138, 142,
- 143, 146, 150, 153, 155, 159, 160, 163, 166, 169,
- 172, 175, 180, 184, 187, 192, 193, 196, 200, 202,
- 206, 207, 210, 213, 216, 220, 224, 228, 230, 234,
- 235, 238, 241, 244, 248, 252, 255, 258, 261, 262,
- 265, 268, 271, 276, 277, 280, 283, 286, 287, 290,
- 292, 294, 297, 300, 303, 305, 308, 309, 312, 314,
- 318, 322, 326, 330, 334, 338, 342, 345, 349, 353,
- 355, 357, 358
-};
-
-/* YYRHS -- A `-1'-separated list of the rules' RHS. */
-static const yytype_int8 yyrhs[] =
-{
- 41, 0, -1, 85, 42, -1, 42, -1, 67, 43,
- -1, 43, -1, -1, 43, 45, -1, 43, 59, -1,
- 43, 71, -1, 43, 84, -1, 43, 26, 1, 35,
- -1, 43, 44, 1, 35, -1, 43, 1, 35, -1,
- 16, -1, 18, -1, 19, -1, 21, -1, 17, -1,
- 22, -1, 20, -1, 23, -1, 35, -1, 65, -1,
- 75, -1, 48, -1, 50, -1, 73, -1, 26, 1,
- 35, -1, 1, 35, -1, 10, 26, 35, -1, 47,
- 51, -1, 11, 26, 35, -1, 49, 51, -1, -1,
- 51, 52, -1, 51, 53, -1, 51, 79, -1, 51,
- 77, -1, 51, 46, -1, 51, 35, -1, 19, 82,
- 35, -1, 18, 83, 86, 35, -1, 20, 87, 86,
- 35, -1, 21, 26, 86, 35, -1, 22, 88, 88,
- 86, 35, -1, 24, 54, 35, -1, -1, 54, 26,
- 55, -1, -1, 38, 83, -1, 7, 89, 35, -1,
- 56, 60, -1, 84, -1, 57, 62, 58, -1, -1,
- 60, 61, -1, 60, 79, -1, 60, 77, -1, 60,
- 35, -1, 60, 46, -1, 18, 83, 86, 35, -1,
- 19, 82, 35, -1, 17, 35, -1, 20, 26, 86,
- 35, -1, -1, 62, 45, -1, 14, 87, 85, -1,
- 84, -1, 63, 66, 64, -1, -1, 66, 45, -1,
- 66, 71, -1, 66, 59, -1, 3, 83, 85, -1,
- 4, 83, 35, -1, 68, 80, 78, -1, 84, -1,
- 69, 72, 70, -1, -1, 72, 45, -1, 72, 71,
- -1, 72, 59, -1, 6, 83, 35, -1, 9, 83,
- 35, -1, 74, 78, -1, 12, 35, -1, 76, 13,
- -1, -1, 78, 79, -1, 78, 35, -1, 78, 46,
- -1, 16, 25, 87, 35, -1, -1, 80, 81, -1,
- 80, 35, -1, 23, 86, -1, -1, 83, 86, -1,
- 26, -1, 27, -1, 5, 35, -1, 8, 35, -1,
- 15, 35, -1, 35, -1, 85, 35, -1, -1, 14,
- 87, -1, 88, -1, 88, 29, 88, -1, 88, 30,
- 88, -1, 88, 31, 88, -1, 88, 32, 88, -1,
- 88, 38, 88, -1, 88, 28, 88, -1, 34, 87,
- 33, -1, 39, 87, -1, 87, 36, 87, -1, 87,
- 37, 87, -1, 26, -1, 27, -1, -1, 26, -1
-};
-
-/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
+ /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
static const yytype_uint16 yyrline[] =
{
- 0, 108, 108, 108, 110, 110, 112, 114, 115, 116,
- 117, 118, 119, 123, 127, 127, 127, 127, 127, 127,
- 127, 127, 131, 132, 133, 134, 135, 136, 140, 141,
- 147, 155, 161, 169, 179, 181, 182, 183, 184, 185,
- 186, 189, 197, 203, 213, 219, 225, 228, 230, 241,
- 242, 247, 256, 261, 269, 272, 274, 275, 276, 277,
- 278, 281, 287, 298, 304, 314, 316, 321, 329, 337,
- 340, 342, 343, 344, 349, 356, 363, 368, 376, 379,
- 381, 382, 383, 386, 394, 401, 408, 414, 421, 423,
- 424, 425, 428, 436, 438, 439, 442, 449, 451, 456,
- 457, 460, 461, 462, 466, 467, 470, 471, 474, 475,
- 476, 477, 478, 479, 480, 481, 482, 483, 484, 487,
- 488, 491, 492
+ 0, 109, 109, 109, 111, 111, 113, 115, 116, 117,
+ 118, 119, 120, 124, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 132, 133, 134, 135, 136, 137, 141,
+ 142, 148, 156, 162, 170, 180, 182, 183, 184, 185,
+ 186, 187, 190, 198, 204, 214, 220, 226, 232, 235,
+ 237, 248, 249, 254, 263, 268, 276, 279, 281, 282,
+ 283, 284, 285, 288, 294, 305, 311, 321, 323, 328,
+ 336, 344, 347, 349, 350, 351, 356, 363, 370, 375,
+ 383, 386, 388, 389, 390, 393, 401, 408, 415, 421,
+ 428, 430, 431, 432, 435, 443, 445, 446, 449, 456,
+ 458, 463, 464, 467, 468, 469, 473, 474, 477, 478,
+ 481, 482, 483, 484, 485, 486, 487, 488, 489, 490,
+ 491, 494, 495, 498, 499
};
#endif
-#if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE
+#if YYDEBUG || YYERROR_VERBOSE || 0
/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
First, the terminals, then, starting at YYNTOKENS, nonterminals. */
static const char *const yytname[] =
@@ -571,9 +537,9 @@ static const char *const yytname[] =
"$end", "error", "$undefined", "T_MAINMENU", "T_MENU", "T_ENDMENU",
"T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG",
"T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS",
- "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT", "T_SELECT", "T_RANGE",
- "T_VISIBLE", "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL",
- "T_LESS", "T_LESS_EQUAL", "T_GREATER", "T_GREATER_EQUAL",
+ "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT", "T_SELECT", "T_IMPLY",
+ "T_RANGE", "T_VISIBLE", "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE",
+ "T_UNEQUAL", "T_LESS", "T_LESS_EQUAL", "T_GREATER", "T_GREATER_EQUAL",
"T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL",
"T_NOT", "$accept", "input", "start", "stmt_list", "option_name",
"common_stmt", "option_error", "config_entry_start", "config_stmt",
@@ -585,260 +551,254 @@ static const char *const yytname[] =
"menu_entry", "menu_end", "menu_stmt", "menu_block", "source_stmt",
"comment", "comment_stmt", "help_start", "help", "depends_list",
"depends", "visibility_list", "visible", "prompt_stmt_opt", "prompt",
- "end", "nl", "if_expr", "expr", "symbol", "word_opt", YY_NULL
+ "end", "nl", "if_expr", "expr", "symbol", "word_opt", YY_NULLPTR
};
#endif
# ifdef YYPRINT
-/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to
- token YYLEX-NUM. */
+/* YYTOKNUM[NUM] -- (External) token number corresponding to the
+ (internal) symbol number NUM (which must be that of a token). */
static const yytype_uint16 yytoknum[] =
{
0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
- 285, 286, 287, 288, 289, 290, 291, 292, 293, 294
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295
};
# endif
-/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
-static const yytype_uint8 yyr1[] =
+#define YYPACT_NINF -92
+
+#define yypact_value_is_default(Yystate) \
+ (!!((Yystate) == (-92)))
+
+#define YYTABLE_NINF -88
+
+#define yytable_value_is_error(Yytable_value) \
+ 0
+
+ /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+static const yytype_int16 yypact[] =
{
- 0, 40, 41, 41, 42, 42, 43, 43, 43, 43,
- 43, 43, 43, 43, 44, 44, 44, 44, 44, 44,
- 44, 44, 45, 45, 45, 45, 45, 45, 46, 46,
- 47, 48, 49, 50, 51, 51, 51, 51, 51, 51,
- 51, 52, 52, 52, 52, 52, 53, 54, 54, 55,
- 55, 56, 57, 58, 59, 60, 60, 60, 60, 60,
- 60, 61, 61, 61, 61, 62, 62, 63, 64, 65,
- 66, 66, 66, 66, 67, 68, 69, 70, 71, 72,
- 72, 72, 72, 73, 74, 75, 76, 77, 78, 78,
- 78, 78, 79, 80, 80, 80, 81, 82, 82, 83,
- 83, 84, 84, 84, 85, 85, 86, 86, 87, 87,
- 87, 87, 87, 87, 87, 87, 87, 87, 87, 88,
- 88, 89, 89
+ 17, 41, -92, 15, -92, 150, -92, 19, -92, -92,
+ -13, -92, 28, 41, 38, 41, 50, 47, 41, 79,
+ 82, 44, 76, -92, -92, -92, -92, -92, -92, -92,
+ -92, -92, 118, -92, 129, -92, -92, -92, -92, -92,
+ -92, -92, -92, -92, -92, -92, -92, -92, -92, -92,
+ -92, -92, 184, -92, -92, 107, -92, 111, -92, 113,
+ -92, 116, -92, 139, 140, 151, -92, -92, 44, 44,
+ 142, 256, -92, 160, 173, 27, 117, 80, 51, 255,
+ -15, 255, 217, -92, -92, -92, -92, -92, -92, -8,
+ -92, 44, 44, 107, 87, 87, 87, 87, 87, 87,
+ -92, -92, 174, 176, 187, 41, 41, 44, 188, 189,
+ 87, -92, 213, -92, -92, -92, -92, 206, -92, -92,
+ 193, 41, 41, 203, -92, -92, -92, -92, -92, -92,
+ -92, -92, -92, -92, -92, -92, -92, 229, -92, 241,
+ -92, -92, -92, -92, -92, -92, -92, -92, -92, -92,
+ 216, -92, -92, -92, -92, -92, -92, -92, -92, -92,
+ 44, 229, 222, 229, 64, 229, 229, 87, 31, 231,
+ -92, -92, 229, 236, 229, 44, -92, 145, 242, -92,
+ -92, 243, 244, 245, 229, 251, -92, -92, 247, -92,
+ 257, 125, -92, -92, -92, -92, -92, 260, 41, -92,
+ -92, -92, -92, -92
};
-/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
-static const yytype_uint8 yyr2[] =
+ /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE does not specify something else to do. Zero
+ means the default is an error. */
+static const yytype_uint8 yydefact[] =
{
- 0, 2, 2, 1, 2, 1, 0, 2, 2, 2,
- 2, 4, 4, 3, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 3, 2,
- 3, 2, 3, 2, 0, 2, 2, 2, 2, 2,
- 2, 3, 4, 4, 4, 5, 3, 0, 3, 0,
- 2, 3, 2, 1, 3, 0, 2, 2, 2, 2,
- 2, 4, 3, 2, 4, 0, 2, 3, 1, 3,
- 0, 2, 2, 2, 3, 3, 3, 1, 3, 0,
- 2, 2, 2, 3, 3, 2, 2, 2, 0, 2,
- 2, 2, 4, 0, 2, 2, 2, 0, 2, 1,
- 1, 2, 2, 2, 1, 2, 0, 2, 1, 3,
- 3, 3, 3, 3, 3, 3, 2, 3, 3, 1,
- 1, 0, 1
+ 6, 0, 106, 0, 3, 0, 6, 6, 101, 102,
+ 0, 1, 0, 0, 0, 0, 123, 0, 0, 0,
+ 0, 0, 0, 14, 19, 15, 16, 21, 17, 18,
+ 20, 22, 0, 23, 0, 7, 35, 26, 35, 27,
+ 57, 67, 8, 72, 24, 95, 81, 9, 28, 90,
+ 25, 10, 0, 107, 2, 76, 13, 0, 103, 0,
+ 124, 0, 104, 0, 0, 0, 121, 122, 0, 0,
+ 0, 110, 105, 0, 0, 0, 0, 0, 0, 0,
+ 90, 0, 0, 77, 85, 53, 86, 31, 33, 0,
+ 118, 0, 0, 69, 0, 0, 0, 0, 0, 0,
+ 11, 12, 0, 0, 0, 0, 99, 0, 0, 0,
+ 0, 49, 0, 41, 40, 36, 37, 0, 39, 38,
+ 0, 0, 99, 0, 61, 62, 58, 60, 59, 68,
+ 56, 55, 73, 75, 71, 74, 70, 108, 97, 0,
+ 96, 82, 84, 80, 83, 79, 92, 93, 91, 117,
+ 119, 120, 116, 111, 112, 113, 114, 115, 30, 88,
+ 0, 108, 0, 108, 108, 108, 108, 0, 0, 0,
+ 89, 65, 108, 0, 108, 0, 98, 0, 0, 42,
+ 100, 0, 0, 0, 108, 51, 48, 29, 0, 64,
+ 0, 109, 94, 43, 44, 45, 46, 0, 0, 50,
+ 63, 66, 47, 52
};
-/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
- Performed when YYTABLE doesn't specify something else to do. Zero
- means the default is an error. */
-static const yytype_uint8 yydefact[] =
+ /* YYPGOTO[NTERM-NUM]. */
+static const yytype_int16 yypgoto[] =
{
- 6, 0, 104, 0, 3, 0, 6, 6, 99, 100,
- 0, 1, 0, 0, 0, 0, 121, 0, 0, 0,
- 0, 0, 0, 14, 18, 15, 16, 20, 17, 19,
- 21, 0, 22, 0, 7, 34, 25, 34, 26, 55,
- 65, 8, 70, 23, 93, 79, 9, 27, 88, 24,
- 10, 0, 105, 2, 74, 13, 0, 101, 0, 122,
- 0, 102, 0, 0, 0, 119, 120, 0, 0, 0,
- 108, 103, 0, 0, 0, 0, 0, 0, 0, 88,
- 0, 0, 75, 83, 51, 84, 30, 32, 0, 116,
- 0, 0, 67, 0, 0, 0, 0, 0, 0, 11,
- 12, 0, 0, 0, 0, 97, 0, 0, 0, 47,
- 0, 40, 39, 35, 36, 0, 38, 37, 0, 0,
- 97, 0, 59, 60, 56, 58, 57, 66, 54, 53,
- 71, 73, 69, 72, 68, 106, 95, 0, 94, 80,
- 82, 78, 81, 77, 90, 91, 89, 115, 117, 118,
- 114, 109, 110, 111, 112, 113, 29, 86, 0, 106,
- 0, 106, 106, 106, 0, 0, 0, 87, 63, 106,
- 0, 106, 0, 96, 0, 0, 41, 98, 0, 0,
- 106, 49, 46, 28, 0, 62, 0, 107, 92, 42,
- 43, 44, 0, 0, 48, 61, 64, 45, 50
+ -92, -92, 285, 291, -92, 32, -66, -92, -92, -92,
+ -92, 261, -92, -92, -92, -92, -92, -92, -92, 1,
+ -92, -92, -92, -92, -92, -92, -92, -92, -92, -92,
+ -92, 24, -92, -92, -92, -92, -92, 221, 220, -64,
+ -92, -92, 179, -1, 67, 0, 110, -67, -91, -92
};
-/* YYDEFGOTO[NTERM-NUM]. */
+ /* YYDEFGOTO[NTERM-NUM]. */
static const yytype_int16 yydefgoto[] =
{
- -1, 3, 4, 5, 33, 34, 112, 35, 36, 37,
- 38, 74, 113, 114, 165, 194, 39, 40, 128, 41,
- 76, 124, 77, 42, 132, 43, 78, 6, 44, 45,
- 141, 46, 80, 47, 48, 49, 115, 116, 81, 117,
- 79, 138, 160, 161, 50, 7, 173, 69, 70, 60
+ -1, 3, 4, 5, 34, 35, 114, 36, 37, 38,
+ 39, 75, 115, 116, 168, 199, 40, 41, 130, 42,
+ 77, 126, 78, 43, 134, 44, 79, 6, 45, 46,
+ 143, 47, 81, 48, 49, 50, 117, 118, 82, 119,
+ 80, 140, 162, 163, 51, 7, 176, 70, 71, 61
};
-/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
- STATE-NUM. */
-#define YYPACT_NINF -91
-static const yytype_int16 yypact[] =
+ /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule whose
+ number is the opposite. If YYTABLE_NINF, syntax error. */
+static const yytype_int16 yytable[] =
{
- 19, 37, -91, 13, -91, 79, -91, 20, -91, -91,
- -16, -91, 21, 37, 25, 37, 41, 36, 37, 78,
- 83, 31, 56, -91, -91, -91, -91, -91, -91, -91,
- -91, 116, -91, 127, -91, -91, -91, -91, -91, -91,
- -91, -91, -91, -91, -91, -91, -91, -91, -91, -91,
- -91, 147, -91, -91, 105, -91, 109, -91, 111, -91,
- 114, -91, 136, 137, 142, -91, -91, 31, 31, 76,
- 254, -91, 143, 146, 27, 115, 207, 258, 243, -14,
- 243, 179, -91, -91, -91, -91, -91, -91, -7, -91,
- 31, 31, 105, 51, 51, 51, 51, 51, 51, -91,
- -91, 156, 168, 181, 37, 37, 31, 178, 51, -91,
- 206, -91, -91, -91, -91, 196, -91, -91, 175, 37,
- 37, 185, -91, -91, -91, -91, -91, -91, -91, -91,
- -91, -91, -91, -91, -91, 214, -91, 230, -91, -91,
- -91, -91, -91, -91, -91, -91, -91, -91, 183, -91,
- -91, -91, -91, -91, -91, -91, -91, -91, 31, 214,
- 194, 214, 45, 214, 51, 26, 195, -91, -91, 214,
- 197, 214, 31, -91, 139, 208, -91, -91, 220, 224,
- 214, 222, -91, -91, 226, -91, 227, 123, -91, -91,
- -91, -91, 235, 37, -91, -91, -91, -91, -91
+ 10, 89, 90, 152, 153, 154, 155, 156, 157, 137,
+ 55, 125, 57, 128, 59, 11, 147, 63, 148, 167,
+ 1, 138, 1, 2, 150, 151, 149, -32, 102, 91,
+ 92, -32, -32, -32, -32, -32, -32, -32, -32, 103,
+ 164, -32, -32, 104, -32, 105, 106, 107, 108, 109,
+ 110, -32, 111, 2, 112, 53, 14, 15, 185, 17,
+ 18, 19, 20, 113, 56, 21, 22, 186, 8, 9,
+ 93, 66, 67, 147, 58, 148, 184, 60, 175, 68,
+ 133, 102, 142, 62, 69, -54, -54, 33, -54, -54,
+ -54, -54, 103, 177, -54, -54, 104, 120, 121, 122,
+ 123, 91, 92, 135, 161, 144, 64, 112, 191, 65,
+ 129, 132, 72, 141, 66, 67, 124, -34, 102, 73,
+ 172, -34, -34, -34, -34, -34, -34, -34, -34, 103,
+ 74, -34, -34, 104, -34, 105, 106, 107, 108, 109,
+ 110, -34, 111, 53, 112, 131, 136, 83, 145, 84,
+ -5, 12, 85, 113, 13, 14, 15, 16, 17, 18,
+ 19, 20, 91, 92, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 86, 87, 32, 2, 91,
+ 92, 192, 91, 92, -4, 12, 33, 88, 13, 14,
+ 15, 16, 17, 18, 19, 20, 100, 203, 21, 22,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31, 101,
+ 158, 32, 159, 160, 169, 165, 166, -87, 102, 170,
+ 33, -87, -87, -87, -87, -87, -87, -87, -87, 171,
+ 174, -87, -87, 104, -87, -87, -87, -87, -87, -87,
+ -87, -87, 102, 175, 112, -78, -78, -78, -78, -78,
+ -78, -78, -78, 146, 92, -78, -78, 104, 179, 13,
+ 14, 15, 16, 17, 18, 19, 20, 187, 112, 21,
+ 22, 178, 189, 180, 181, 182, 183, 146, 193, 194,
+ 195, 196, 188, 200, 190, 94, 95, 96, 97, 98,
+ 198, 33, 54, 201, 197, 99, 202, 52, 127, 76,
+ 139, 173
};
-/* YYPGOTO[NTERM-NUM]. */
-static const yytype_int16 yypgoto[] =
+static const yytype_uint8 yycheck[] =
{
- -91, -91, 264, 268, -91, 30, -65, -91, -91, -91,
- -91, 238, -91, -91, -91, -91, -91, -91, -91, -12,
- -91, -91, -91, -91, -91, -91, -91, -91, -91, -91,
- -91, -5, -91, -91, -91, -91, -91, 200, 209, -61,
- -91, -91, 170, -1, 65, 0, 118, -66, -90, -91
+ 1, 68, 69, 94, 95, 96, 97, 98, 99, 24,
+ 10, 77, 13, 77, 15, 0, 82, 18, 82, 110,
+ 3, 36, 3, 36, 91, 92, 34, 0, 1, 37,
+ 38, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 107, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 25, 36, 27, 36, 5, 6, 27, 8,
+ 9, 10, 11, 36, 36, 14, 15, 36, 27, 28,
+ 70, 27, 28, 139, 36, 139, 167, 27, 14, 35,
+ 79, 1, 81, 36, 40, 5, 6, 36, 8, 9,
+ 10, 11, 12, 160, 14, 15, 16, 17, 18, 19,
+ 20, 37, 38, 79, 105, 81, 27, 27, 175, 27,
+ 78, 79, 36, 81, 27, 28, 36, 0, 1, 1,
+ 121, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 1, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 25, 36, 27, 78, 79, 36, 81, 36,
+ 0, 1, 36, 36, 4, 5, 6, 7, 8, 9,
+ 10, 11, 37, 38, 14, 15, 16, 17, 18, 19,
+ 20, 21, 22, 23, 24, 36, 36, 27, 36, 37,
+ 38, 36, 37, 38, 0, 1, 36, 36, 4, 5,
+ 6, 7, 8, 9, 10, 11, 36, 198, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 36,
+ 36, 27, 36, 26, 1, 27, 27, 0, 1, 13,
+ 36, 4, 5, 6, 7, 8, 9, 10, 11, 36,
+ 27, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 1, 14, 27, 4, 5, 6, 7, 8,
+ 9, 10, 11, 36, 38, 14, 15, 16, 36, 4,
+ 5, 6, 7, 8, 9, 10, 11, 36, 27, 14,
+ 15, 161, 36, 163, 164, 165, 166, 36, 36, 36,
+ 36, 36, 172, 36, 174, 29, 30, 31, 32, 33,
+ 39, 36, 7, 36, 184, 39, 36, 6, 77, 38,
+ 80, 122
};
-/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
- positive, shift that token. If negative, reduce the rule which
- number is the opposite. If YYTABLE_NINF, syntax error. */
-#define YYTABLE_NINF -86
-static const yytype_int16 yytable[] =
+ /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_uint8 yystos[] =
{
- 10, 88, 89, 150, 151, 152, 153, 154, 155, 135,
- 54, 123, 56, 11, 58, 126, 145, 62, 164, 2,
- 146, 136, 1, 1, 148, 149, 147, -31, 101, 90,
- 91, -31, -31, -31, -31, -31, -31, -31, -31, 102,
- 162, -31, -31, 103, -31, 104, 105, 106, 107, 108,
- -31, 109, 181, 110, 2, 52, 55, 65, 66, 172,
- 57, 182, 111, 8, 9, 67, 131, 59, 140, 92,
- 68, 61, 145, 133, 180, 142, 146, 65, 66, -5,
- 12, 90, 91, 13, 14, 15, 16, 17, 18, 19,
- 20, 71, 174, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 159, 63, 31, 187, 127, 130, 64,
- 139, 2, 90, 91, 32, -33, 101, 72, 169, -33,
- -33, -33, -33, -33, -33, -33, -33, 102, 73, -33,
- -33, 103, -33, 104, 105, 106, 107, 108, -33, 109,
- 52, 110, 129, 134, 82, 143, 83, -4, 12, 84,
- 111, 13, 14, 15, 16, 17, 18, 19, 20, 90,
- 91, 21, 22, 23, 24, 25, 26, 27, 28, 29,
- 30, 85, 86, 31, 188, 90, 91, 87, 99, -85,
- 101, 100, 32, -85, -85, -85, -85, -85, -85, -85,
- -85, 156, 198, -85, -85, 103, -85, -85, -85, -85,
- -85, -85, -85, 157, 163, 110, 158, 166, 101, 167,
- 168, 171, -52, -52, 144, -52, -52, -52, -52, 102,
- 91, -52, -52, 103, 118, 119, 120, 121, 172, 176,
- 183, 101, 185, 110, -76, -76, -76, -76, -76, -76,
- -76, -76, 122, 189, -76, -76, 103, 13, 14, 15,
- 16, 17, 18, 19, 20, 190, 110, 21, 22, 191,
- 193, 195, 196, 14, 15, 144, 17, 18, 19, 20,
- 197, 53, 21, 22, 51, 75, 125, 175, 32, 177,
- 178, 179, 93, 94, 95, 96, 97, 184, 137, 186,
- 170, 0, 98, 32, 0, 0, 0, 0, 192
+ 0, 3, 36, 42, 43, 44, 68, 86, 27, 28,
+ 84, 0, 1, 4, 5, 6, 7, 8, 9, 10,
+ 11, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 27, 36, 45, 46, 48, 49, 50, 51,
+ 57, 58, 60, 64, 66, 69, 70, 72, 74, 75,
+ 76, 85, 44, 36, 43, 86, 36, 84, 36, 84,
+ 27, 90, 36, 84, 27, 27, 27, 28, 35, 40,
+ 88, 89, 36, 1, 1, 52, 52, 61, 63, 67,
+ 81, 73, 79, 36, 36, 36, 36, 36, 36, 88,
+ 88, 37, 38, 86, 29, 30, 31, 32, 33, 39,
+ 36, 36, 1, 12, 16, 18, 19, 20, 21, 22,
+ 23, 25, 27, 36, 47, 53, 54, 77, 78, 80,
+ 17, 18, 19, 20, 36, 47, 62, 78, 80, 46,
+ 59, 85, 46, 60, 65, 72, 85, 24, 36, 79,
+ 82, 46, 60, 71, 72, 85, 36, 47, 80, 34,
+ 88, 88, 89, 89, 89, 89, 89, 89, 36, 36,
+ 26, 84, 83, 84, 88, 27, 27, 89, 55, 1,
+ 13, 36, 84, 83, 27, 14, 87, 88, 87, 36,
+ 87, 87, 87, 87, 89, 27, 36, 36, 87, 36,
+ 87, 88, 36, 36, 36, 36, 36, 87, 39, 56,
+ 36, 36, 36, 84
};
-#define yypact_value_is_default(yystate) \
- ((yystate) == (-91))
-
-#define yytable_value_is_error(yytable_value) \
- YYID (0)
-
-static const yytype_int16 yycheck[] =
+ /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_uint8 yyr1[] =
{
- 1, 67, 68, 93, 94, 95, 96, 97, 98, 23,
- 10, 76, 13, 0, 15, 76, 81, 18, 108, 35,
- 81, 35, 3, 3, 90, 91, 33, 0, 1, 36,
- 37, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 106, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 23, 24, 26, 26, 35, 35, 35, 26, 27, 14,
- 35, 35, 35, 26, 27, 34, 78, 26, 80, 69,
- 39, 35, 137, 78, 164, 80, 137, 26, 27, 0,
- 1, 36, 37, 4, 5, 6, 7, 8, 9, 10,
- 11, 35, 158, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 104, 26, 26, 172, 77, 78, 26,
- 80, 35, 36, 37, 35, 0, 1, 1, 119, 4,
- 5, 6, 7, 8, 9, 10, 11, 12, 1, 14,
- 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
- 35, 26, 77, 78, 35, 80, 35, 0, 1, 35,
- 35, 4, 5, 6, 7, 8, 9, 10, 11, 36,
- 37, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 23, 35, 35, 26, 35, 36, 37, 35, 35, 0,
- 1, 35, 35, 4, 5, 6, 7, 8, 9, 10,
- 11, 35, 193, 14, 15, 16, 17, 18, 19, 20,
- 21, 22, 23, 35, 26, 26, 25, 1, 1, 13,
- 35, 26, 5, 6, 35, 8, 9, 10, 11, 12,
- 37, 14, 15, 16, 17, 18, 19, 20, 14, 35,
- 35, 1, 35, 26, 4, 5, 6, 7, 8, 9,
- 10, 11, 35, 35, 14, 15, 16, 4, 5, 6,
- 7, 8, 9, 10, 11, 35, 26, 14, 15, 35,
- 38, 35, 35, 5, 6, 35, 8, 9, 10, 11,
- 35, 7, 14, 15, 6, 37, 76, 159, 35, 161,
- 162, 163, 28, 29, 30, 31, 32, 169, 79, 171,
- 120, -1, 38, 35, -1, -1, -1, -1, 180
+ 0, 41, 42, 42, 43, 43, 44, 44, 44, 44,
+ 44, 44, 44, 44, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 46, 46, 46, 46, 46, 46, 47,
+ 47, 48, 49, 50, 51, 52, 52, 52, 52, 52,
+ 52, 52, 53, 53, 53, 53, 53, 53, 54, 55,
+ 55, 56, 56, 57, 58, 59, 60, 61, 61, 61,
+ 61, 61, 61, 62, 62, 62, 62, 63, 63, 64,
+ 65, 66, 67, 67, 67, 67, 68, 69, 70, 71,
+ 72, 73, 73, 73, 73, 74, 75, 76, 77, 78,
+ 79, 79, 79, 79, 80, 81, 81, 81, 82, 83,
+ 83, 84, 84, 85, 85, 85, 86, 86, 87, 87,
+ 88, 88, 88, 88, 88, 88, 88, 88, 88, 88,
+ 88, 89, 89, 90, 90
};
-/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
- symbol of state STATE-NUM. */
-static const yytype_uint8 yystos[] =
+ /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
+static const yytype_uint8 yyr2[] =
{
- 0, 3, 35, 41, 42, 43, 67, 85, 26, 27,
- 83, 0, 1, 4, 5, 6, 7, 8, 9, 10,
- 11, 14, 15, 16, 17, 18, 19, 20, 21, 22,
- 23, 26, 35, 44, 45, 47, 48, 49, 50, 56,
- 57, 59, 63, 65, 68, 69, 71, 73, 74, 75,
- 84, 43, 35, 42, 85, 35, 83, 35, 83, 26,
- 89, 35, 83, 26, 26, 26, 27, 34, 39, 87,
- 88, 35, 1, 1, 51, 51, 60, 62, 66, 80,
- 72, 78, 35, 35, 35, 35, 35, 35, 87, 87,
- 36, 37, 85, 28, 29, 30, 31, 32, 38, 35,
- 35, 1, 12, 16, 18, 19, 20, 21, 22, 24,
- 26, 35, 46, 52, 53, 76, 77, 79, 17, 18,
- 19, 20, 35, 46, 61, 77, 79, 45, 58, 84,
- 45, 59, 64, 71, 84, 23, 35, 78, 81, 45,
- 59, 70, 71, 84, 35, 46, 79, 33, 87, 87,
- 88, 88, 88, 88, 88, 88, 35, 35, 25, 83,
- 82, 83, 87, 26, 88, 54, 1, 13, 35, 83,
- 82, 26, 14, 86, 87, 86, 35, 86, 86, 86,
- 88, 26, 35, 35, 86, 35, 86, 87, 35, 35,
- 35, 35, 86, 38, 55, 35, 35, 35, 83
+ 0, 2, 2, 1, 2, 1, 0, 2, 2, 2,
+ 2, 4, 4, 3, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 3,
+ 2, 3, 2, 3, 2, 0, 2, 2, 2, 2,
+ 2, 2, 3, 4, 4, 4, 4, 5, 3, 0,
+ 3, 0, 2, 3, 2, 1, 3, 0, 2, 2,
+ 2, 2, 2, 4, 3, 2, 4, 0, 2, 3,
+ 1, 3, 0, 2, 2, 2, 3, 3, 3, 1,
+ 3, 0, 2, 2, 2, 3, 3, 2, 2, 2,
+ 0, 2, 2, 2, 4, 0, 2, 2, 2, 0,
+ 2, 1, 1, 2, 2, 2, 1, 2, 0, 2,
+ 1, 3, 3, 3, 3, 3, 3, 3, 2, 3,
+ 3, 1, 1, 0, 1
};
-#define yyerrok (yyerrstatus = 0)
-#define yyclearin (yychar = YYEMPTY)
-#define YYEMPTY (-2)
-#define YYEOF 0
-
-#define YYACCEPT goto yyacceptlab
-#define YYABORT goto yyabortlab
-#define YYERROR goto yyerrorlab
-
-
-/* Like YYERROR except do call yyerror. This remains here temporarily
- to ease the transition to the new meaning of YYERROR, for GCC.
- Once GCC version 2 has supplanted version 1, this can go. However,
- YYFAIL appears to be in use. Nevertheless, it is formally deprecated
- in Bison 2.4.2's NEWS entry, where a plan to phase it out is
- discussed. */
-
-#define YYFAIL goto yyerrlab
-#if defined YYFAIL
- /* This is here to suppress warnings from the GCC cpp's
- -Wunused-macros. Normally we don't worry about that warning, but
- some users do, and we want to make it easy for users to remove
- YYFAIL uses, which will produce warnings from Bison 2.5. */
-#endif
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
#define YYRECOVERING() (!!yyerrstatus)
@@ -855,55 +815,15 @@ do \
else \
{ \
yyerror (YY_("syntax error: cannot back up")); \
- YYERROR; \
- } \
-while (YYID (0))
-
-
-#define YYTERROR 1
-#define YYERRCODE 256
-
-
-/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
- If N is 0, then set CURRENT to the empty location which ends
- the previous symbol: RHS[0] (always defined). */
-
-#define YYRHSLOC(Rhs, K) ((Rhs)[K])
-#ifndef YYLLOC_DEFAULT
-# define YYLLOC_DEFAULT(Current, Rhs, N) \
- do \
- if (YYID (N)) \
- { \
- (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
- (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
- (Current).last_line = YYRHSLOC (Rhs, N).last_line; \
- (Current).last_column = YYRHSLOC (Rhs, N).last_column; \
- } \
- else \
- { \
- (Current).first_line = (Current).last_line = \
- YYRHSLOC (Rhs, 0).last_line; \
- (Current).first_column = (Current).last_column = \
- YYRHSLOC (Rhs, 0).last_column; \
- } \
- while (YYID (0))
-#endif
-
-
-/* This macro is provided for backward compatibility. */
-
-#ifndef YY_LOCATION_PRINT
-# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
-#endif
+ YYERROR; \
+ } \
+while (0)
+/* Error token number */
+#define YYTERROR 1
+#define YYERRCODE 256
-/* YYLEX -- calling `yylex' with the right arguments. */
-#ifdef YYLEX_PARAM
-# define YYLEX yylex (YYLEX_PARAM)
-#else
-# define YYLEX yylex ()
-#endif
/* Enable debugging if requested. */
#if YYDEBUG
@@ -913,40 +833,36 @@ while (YYID (0))
# define YYFPRINTF fprintf
# endif
-# define YYDPRINTF(Args) \
-do { \
- if (yydebug) \
- YYFPRINTF Args; \
-} while (YYID (0))
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+/* This macro is provided for backward compatibility. */
+#ifndef YY_LOCATION_PRINT
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+#endif
+
-# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
-do { \
- if (yydebug) \
- { \
- YYFPRINTF (stderr, "%s ", Title); \
- yy_symbol_print (stderr, \
- Type, Value); \
- YYFPRINTF (stderr, "\n"); \
- } \
-} while (YYID (0))
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
-/*--------------------------------.
-| Print this symbol on YYOUTPUT. |
-`--------------------------------*/
+/*----------------------------------------.
+| Print this symbol's value on YYOUTPUT. |
+`----------------------------------------*/
-/*ARGSUSED*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
-#else
-static void
-yy_symbol_value_print (yyoutput, yytype, yyvaluep)
- FILE *yyoutput;
- int yytype;
- YYSTYPE const * const yyvaluep;
-#endif
{
FILE *yyo = yyoutput;
YYUSE (yyo);
@@ -955,14 +871,8 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep)
# ifdef YYPRINT
if (yytype < YYNTOKENS)
YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep);
-# else
- YYUSE (yyoutput);
# endif
- switch (yytype)
- {
- default:
- break;
- }
+ YYUSE (yytype);
}
@@ -970,22 +880,11 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep)
| Print this symbol on YYOUTPUT. |
`--------------------------------*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
static void
yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep)
-#else
-static void
-yy_symbol_print (yyoutput, yytype, yyvaluep)
- FILE *yyoutput;
- int yytype;
- YYSTYPE const * const yyvaluep;
-#endif
{
- if (yytype < YYNTOKENS)
- YYFPRINTF (yyoutput, "token %s (", yytname[yytype]);
- else
- YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]);
+ YYFPRINTF (yyoutput, "%s %s (",
+ yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
yy_symbol_value_print (yyoutput, yytype, yyvaluep);
YYFPRINTF (yyoutput, ")");
@@ -996,16 +895,8 @@ yy_symbol_print (yyoutput, yytype, yyvaluep)
| TOP (included). |
`------------------------------------------------------------------*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
static void
yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop)
-#else
-static void
-yy_stack_print (yybottom, yytop)
- yytype_int16 *yybottom;
- yytype_int16 *yytop;
-#endif
{
YYFPRINTF (stderr, "Stack now");
for (; yybottom <= yytop; yybottom++)
@@ -1016,49 +907,42 @@ yy_stack_print (yybottom, yytop)
YYFPRINTF (stderr, "\n");
}
-# define YY_STACK_PRINT(Bottom, Top) \
-do { \
- if (yydebug) \
- yy_stack_print ((Bottom), (Top)); \
-} while (YYID (0))
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
/*------------------------------------------------.
| Report that the YYRULE is going to be reduced. |
`------------------------------------------------*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
static void
-yy_reduce_print (YYSTYPE *yyvsp, int yyrule)
-#else
-static void
-yy_reduce_print (yyvsp, yyrule)
- YYSTYPE *yyvsp;
- int yyrule;
-#endif
+yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule)
{
+ unsigned long int yylno = yyrline[yyrule];
int yynrhs = yyr2[yyrule];
int yyi;
- unsigned long int yylno = yyrline[yyrule];
YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n",
- yyrule - 1, yylno);
+ yyrule - 1, yylno);
/* The symbols being reduced. */
for (yyi = 0; yyi < yynrhs; yyi++)
{
YYFPRINTF (stderr, " $%d = ", yyi + 1);
- yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi],
- &(yyvsp[(yyi + 1) - (yynrhs)])
- );
+ yy_symbol_print (stderr,
+ yystos[yyssp[yyi + 1 - yynrhs]],
+ &(yyvsp[(yyi + 1) - (yynrhs)])
+ );
YYFPRINTF (stderr, "\n");
}
}
-# define YY_REDUCE_PRINT(Rule) \
-do { \
- if (yydebug) \
- yy_reduce_print (yyvsp, Rule); \
-} while (YYID (0))
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyssp, yyvsp, Rule); \
+} while (0)
/* Nonzero means print parse trace. It is left uninitialized so that
multiple parsers can coexist. */
@@ -1072,7 +956,7 @@ int yydebug;
/* YYINITDEPTH -- initial size of the parser's stacks. */
-#ifndef YYINITDEPTH
+#ifndef YYINITDEPTH
# define YYINITDEPTH 200
#endif
@@ -1095,15 +979,8 @@ int yydebug;
# define yystrlen strlen
# else
/* Return the length of YYSTR. */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
static YYSIZE_T
yystrlen (const char *yystr)
-#else
-static YYSIZE_T
-yystrlen (yystr)
- const char *yystr;
-#endif
{
YYSIZE_T yylen;
for (yylen = 0; yystr[yylen]; yylen++)
@@ -1119,16 +996,8 @@ yystrlen (yystr)
# else
/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
YYDEST. */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
static char *
yystpcpy (char *yydest, const char *yysrc)
-#else
-static char *
-yystpcpy (yydest, yysrc)
- char *yydest;
- const char *yysrc;
-#endif
{
char *yyd = yydest;
const char *yys = yysrc;
@@ -1158,27 +1027,27 @@ yytnamerr (char *yyres, const char *yystr)
char const *yyp = yystr;
for (;;)
- switch (*++yyp)
- {
- case '\'':
- case ',':
- goto do_not_strip_quotes;
-
- case '\\':
- if (*++yyp != '\\')
- goto do_not_strip_quotes;
- /* Fall through. */
- default:
- if (yyres)
- yyres[yyn] = *yyp;
- yyn++;
- break;
-
- case '"':
- if (yyres)
- yyres[yyn] = '\0';
- return yyn;
- }
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ /* Fall through. */
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
do_not_strip_quotes: ;
}
@@ -1201,12 +1070,11 @@ static int
yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
yytype_int16 *yyssp, int yytoken)
{
- YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]);
+ YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
YYSIZE_T yysize = yysize0;
- YYSIZE_T yysize1;
enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
/* Internationalized format string. */
- const char *yyformat = YY_NULL;
+ const char *yyformat = YY_NULLPTR;
/* Arguments of yyformat. */
char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
/* Number of reported tokens (one for the "unexpected", one per
@@ -1214,10 +1082,6 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
int yycount = 0;
/* There are many possibilities here to consider:
- - Assume YYFAIL is not used. It's too flawed to consider. See
- <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
- for details. YYERROR is fine as it does not invoke this
- function.
- If this state is a consistent state with a default action, then
the only way this function was invoked is if the default action
is an error action. In that case, don't check for expected
@@ -1266,11 +1130,13 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
break;
}
yyarg[yycount++] = yytname[yyx];
- yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]);
- if (! (yysize <= yysize1
- && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
- return 2;
- yysize = yysize1;
+ {
+ YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
+ if (! (yysize <= yysize1
+ && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+ }
}
}
}
@@ -1290,10 +1156,12 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
# undef YYCASE_
}
- yysize1 = yysize + yystrlen (yyformat);
- if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
- return 2;
- yysize = yysize1;
+ {
+ YYSIZE_T yysize1 = yysize + yystrlen (yyformat);
+ if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+ return 2;
+ yysize = yysize1;
+ }
if (*yymsg_alloc < yysize)
{
@@ -1330,78 +1198,58 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
| Release the memory associated to this symbol. |
`-----------------------------------------------*/
-/*ARGSUSED*/
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
static void
yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep)
-#else
-static void
-yydestruct (yymsg, yytype, yyvaluep)
- const char *yymsg;
- int yytype;
- YYSTYPE *yyvaluep;
-#endif
{
YYUSE (yyvaluep);
-
if (!yymsg)
yymsg = "Deleting";
YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
switch (yytype)
{
- case 57: /* "choice_entry" */
+ case 58: /* choice_entry */
- {
+ {
fprintf(stderr, "%s:%d: missing end statement for this entry\n",
- (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
- if (current_menu == (yyvaluep->menu))
+ ((*yyvaluep).menu)->file->name, ((*yyvaluep).menu)->lineno);
+ if (current_menu == ((*yyvaluep).menu))
menu_end_menu();
-};
+}
+
+ break;
- break;
- case 63: /* "if_entry" */
+ case 64: /* if_entry */
- {
+ {
fprintf(stderr, "%s:%d: missing end statement for this entry\n",
- (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
- if (current_menu == (yyvaluep->menu))
+ ((*yyvaluep).menu)->file->name, ((*yyvaluep).menu)->lineno);
+ if (current_menu == ((*yyvaluep).menu))
menu_end_menu();
-};
+}
- break;
- case 69: /* "menu_entry" */
+ break;
- {
+ case 70: /* menu_entry */
+
+ {
fprintf(stderr, "%s:%d: missing end statement for this entry\n",
- (yyvaluep->menu)->file->name, (yyvaluep->menu)->lineno);
- if (current_menu == (yyvaluep->menu))
+ ((*yyvaluep).menu)->file->name, ((*yyvaluep).menu)->lineno);
+ if (current_menu == ((*yyvaluep).menu))
menu_end_menu();
-};
+}
+
+ break;
- break;
default:
- break;
+ break;
}
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
}
-/* Prevent warnings from -Wmissing-prototypes. */
-#ifdef YYPARSE_PARAM
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void *YYPARSE_PARAM);
-#else
-int yyparse ();
-#endif
-#else /* ! YYPARSE_PARAM */
-#if defined __STDC__ || defined __cplusplus
-int yyparse (void);
-#else
-int yyparse ();
-#endif
-#endif /* ! YYPARSE_PARAM */
/* The lookahead symbol. */
@@ -1409,7 +1257,6 @@ int yychar;
/* The semantic value of the lookahead symbol. */
YYSTYPE yylval;
-
/* Number of syntax errors so far. */
int yynerrs;
@@ -1418,35 +1265,16 @@ int yynerrs;
| yyparse. |
`----------*/
-#ifdef YYPARSE_PARAM
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
-int
-yyparse (void *YYPARSE_PARAM)
-#else
-int
-yyparse (YYPARSE_PARAM)
- void *YYPARSE_PARAM;
-#endif
-#else /* ! YYPARSE_PARAM */
-#if (defined __STDC__ || defined __C99__FUNC__ \
- || defined __cplusplus || defined _MSC_VER)
int
yyparse (void)
-#else
-int
-yyparse ()
-
-#endif
-#endif
{
int yystate;
/* Number of tokens to shift before error messages enabled. */
int yyerrstatus;
/* The stacks and their tools:
- `yyss': related to states.
- `yyvs': related to semantic values.
+ 'yyss': related to states.
+ 'yyvs': related to semantic values.
Refer to the stacks through separate pointers, to allow yyoverflow
to reallocate them elsewhere. */
@@ -1466,7 +1294,7 @@ yyparse ()
int yyn;
int yyresult;
/* Lookahead token as an internal (translated) token number. */
- int yytoken;
+ int yytoken = 0;
/* The variables used to return semantic value and location from the
action routines. */
YYSTYPE yyval;
@@ -1484,9 +1312,8 @@ yyparse ()
Keep to zero when no symbol should be popped. */
int yylen = 0;
- yytoken = 0;
- yyss = yyssa;
- yyvs = yyvsa;
+ yyssp = yyss = yyssa;
+ yyvsp = yyvs = yyvsa;
yystacksize = YYINITDEPTH;
YYDPRINTF ((stderr, "Starting parse\n"));
@@ -1495,14 +1322,6 @@ yyparse ()
yyerrstatus = 0;
yynerrs = 0;
yychar = YYEMPTY; /* Cause a token to be read. */
-
- /* Initialize stack pointers.
- Waste one element of value and location stack
- so that they stay on the same level as the state stack.
- The wasted elements are never initialized. */
- yyssp = yyss;
- yyvsp = yyvs;
-
goto yysetstate;
/*------------------------------------------------------------.
@@ -1523,23 +1342,23 @@ yyparse ()
#ifdef yyoverflow
{
- /* Give user a chance to reallocate the stack. Use copies of
- these so that the &'s don't force the real ones into
- memory. */
- YYSTYPE *yyvs1 = yyvs;
- yytype_int16 *yyss1 = yyss;
-
- /* Each stack pointer address is followed by the size of the
- data in use in that stack, in bytes. This used to be a
- conditional around just the two extra args, but that might
- be undefined if yyoverflow is a macro. */
- yyoverflow (YY_("memory exhausted"),
- &yyss1, yysize * sizeof (*yyssp),
- &yyvs1, yysize * sizeof (*yyvsp),
- &yystacksize);
-
- yyss = yyss1;
- yyvs = yyvs1;
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ YYSTYPE *yyvs1 = yyvs;
+ yytype_int16 *yyss1 = yyss;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * sizeof (*yyssp),
+ &yyvs1, yysize * sizeof (*yyvsp),
+ &yystacksize);
+
+ yyss = yyss1;
+ yyvs = yyvs1;
}
#else /* no yyoverflow */
# ifndef YYSTACK_RELOCATE
@@ -1547,22 +1366,22 @@ yyparse ()
# else
/* Extend the stack our own way. */
if (YYMAXDEPTH <= yystacksize)
- goto yyexhaustedlab;
+ goto yyexhaustedlab;
yystacksize *= 2;
if (YYMAXDEPTH < yystacksize)
- yystacksize = YYMAXDEPTH;
+ yystacksize = YYMAXDEPTH;
{
- yytype_int16 *yyss1 = yyss;
- union yyalloc *yyptr =
- (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
- if (! yyptr)
- goto yyexhaustedlab;
- YYSTACK_RELOCATE (yyss_alloc, yyss);
- YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+ yytype_int16 *yyss1 = yyss;
+ union yyalloc *yyptr =
+ (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
# undef YYSTACK_RELOCATE
- if (yyss1 != yyssa)
- YYSTACK_FREE (yyss1);
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
}
# endif
#endif /* no yyoverflow */
@@ -1571,10 +1390,10 @@ yyparse ()
yyvsp = yyvs + yysize - 1;
YYDPRINTF ((stderr, "Stack size increased to %lu\n",
- (unsigned long int) yystacksize));
+ (unsigned long int) yystacksize));
if (yyss + yystacksize - 1 <= yyssp)
- YYABORT;
+ YYABORT;
}
YYDPRINTF ((stderr, "Entering state %d\n", yystate));
@@ -1603,7 +1422,7 @@ yybackup:
if (yychar == YYEMPTY)
{
YYDPRINTF ((stderr, "Reading a token: "));
- yychar = YYLEX;
+ yychar = yylex ();
}
if (yychar <= YYEOF)
@@ -1643,7 +1462,9 @@ yybackup:
yychar = YYEMPTY;
yystate = yyn;
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
goto yynewstate;
@@ -1666,7 +1487,7 @@ yyreduce:
yylen = yyr2[yyn];
/* If YYLEN is nonzero, implement the default value of the action:
- `$$ = $1'.
+ '$$ = $1'.
Otherwise, the following line sets YYVAL to garbage.
This behavior is undocumented and Bison
@@ -1682,64 +1503,73 @@ yyreduce:
case 10:
{ zconf_error("unexpected end statement"); }
+
break;
case 11:
- { zconf_error("unknown statement \"%s\"", (yyvsp[(2) - (4)].string)); }
+ { zconf_error("unknown statement \"%s\"", (yyvsp[-2].string)); }
+
break;
case 12:
{
- zconf_error("unexpected option \"%s\"", kconf_id_strings + (yyvsp[(2) - (4)].id)->name);
+ zconf_error("unexpected option \"%s\"", kconf_id_strings + (yyvsp[-2].id)->name);
}
+
break;
case 13:
{ zconf_error("invalid statement"); }
+
break;
- case 28:
+ case 29:
+
+ { zconf_error("unknown option \"%s\"", (yyvsp[-2].string)); }
- { zconf_error("unknown option \"%s\"", (yyvsp[(1) - (3)].string)); }
break;
- case 29:
+ case 30:
{ zconf_error("invalid option"); }
+
break;
- case 30:
+ case 31:
{
- struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), 0);
+ struct symbol *sym = sym_lookup((yyvsp[-1].string), 0);
sym->flags |= SYMBOL_OPTIONAL;
menu_add_entry(sym);
- printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
+ printd(DEBUG_PARSE, "%s:%d:config %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string));
}
+
break;
- case 31:
+ case 32:
{
menu_end_entry();
printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 32:
+ case 33:
{
- struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), 0);
+ struct symbol *sym = sym_lookup((yyvsp[-1].string), 0);
sym->flags |= SYMBOL_OPTIONAL;
menu_add_entry(sym);
- printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
+ printd(DEBUG_PARSE, "%s:%d:menuconfig %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string));
}
+
break;
- case 33:
+ case 34:
{
if (current_entry->prompt)
@@ -1749,352 +1579,410 @@ yyreduce:
menu_end_entry();
printd(DEBUG_PARSE, "%s:%d:endconfig\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 41:
+ case 42:
{
- menu_set_type((yyvsp[(1) - (3)].id)->stype);
+ menu_set_type((yyvsp[-2].id)->stype);
printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
zconf_curname(), zconf_lineno(),
- (yyvsp[(1) - (3)].id)->stype);
+ (yyvsp[-2].id)->stype);
}
+
break;
- case 42:
+ case 43:
{
- menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
+ menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr));
printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 43:
+ case 44:
{
- menu_add_expr(P_DEFAULT, (yyvsp[(2) - (4)].expr), (yyvsp[(3) - (4)].expr));
- if ((yyvsp[(1) - (4)].id)->stype != S_UNKNOWN)
- menu_set_type((yyvsp[(1) - (4)].id)->stype);
+ menu_add_expr(P_DEFAULT, (yyvsp[-2].expr), (yyvsp[-1].expr));
+ if ((yyvsp[-3].id)->stype != S_UNKNOWN)
+ menu_set_type((yyvsp[-3].id)->stype);
printd(DEBUG_PARSE, "%s:%d:default(%u)\n",
zconf_curname(), zconf_lineno(),
- (yyvsp[(1) - (4)].id)->stype);
+ (yyvsp[-3].id)->stype);
}
+
break;
- case 44:
+ case 45:
{
- menu_add_symbol(P_SELECT, sym_lookup((yyvsp[(2) - (4)].string), 0), (yyvsp[(3) - (4)].expr));
+ menu_add_symbol(P_SELECT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr));
printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 45:
+ case 46:
{
- menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[(2) - (5)].symbol), (yyvsp[(3) - (5)].symbol)), (yyvsp[(4) - (5)].expr));
+ menu_add_symbol(P_IMPLY, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr));
+ printd(DEBUG_PARSE, "%s:%d:imply\n", zconf_curname(), zconf_lineno());
+}
+
+ break;
+
+ case 47:
+
+ {
+ menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[-3].symbol), (yyvsp[-2].symbol)), (yyvsp[-1].expr));
printd(DEBUG_PARSE, "%s:%d:range\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 48:
+ case 50:
{
- const struct kconf_id *id = kconf_id_lookup((yyvsp[(2) - (3)].string), strlen((yyvsp[(2) - (3)].string)));
+ const struct kconf_id *id = kconf_id_lookup((yyvsp[-1].string), strlen((yyvsp[-1].string)));
if (id && id->flags & TF_OPTION)
- menu_add_option(id->token, (yyvsp[(3) - (3)].string));
+ menu_add_option(id->token, (yyvsp[0].string));
else
- zconfprint("warning: ignoring unknown option %s", (yyvsp[(2) - (3)].string));
- free((yyvsp[(2) - (3)].string));
+ zconfprint("warning: ignoring unknown option %s", (yyvsp[-1].string));
+ free((yyvsp[-1].string));
}
+
break;
- case 49:
+ case 51:
{ (yyval.string) = NULL; }
+
break;
- case 50:
+ case 52:
+
+ { (yyval.string) = (yyvsp[0].string); }
- { (yyval.string) = (yyvsp[(2) - (2)].string); }
break;
- case 51:
+ case 53:
{
- struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), SYMBOL_CHOICE);
+ struct symbol *sym = sym_lookup((yyvsp[-1].string), SYMBOL_CHOICE);
sym->flags |= SYMBOL_AUTO;
menu_add_entry(sym);
menu_add_expr(P_CHOICE, NULL, NULL);
printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 52:
+ case 54:
{
(yyval.menu) = menu_add_menu();
}
+
break;
- case 53:
+ case 55:
{
- if (zconf_endtoken((yyvsp[(1) - (1)].id), T_CHOICE, T_ENDCHOICE)) {
+ if (zconf_endtoken((yyvsp[0].id), T_CHOICE, T_ENDCHOICE)) {
menu_end_menu();
printd(DEBUG_PARSE, "%s:%d:endchoice\n", zconf_curname(), zconf_lineno());
}
}
+
break;
- case 61:
+ case 63:
{
- menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
+ menu_add_prompt(P_PROMPT, (yyvsp[-2].string), (yyvsp[-1].expr));
printd(DEBUG_PARSE, "%s:%d:prompt\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 62:
+ case 64:
{
- if ((yyvsp[(1) - (3)].id)->stype == S_BOOLEAN || (yyvsp[(1) - (3)].id)->stype == S_TRISTATE) {
- menu_set_type((yyvsp[(1) - (3)].id)->stype);
+ if ((yyvsp[-2].id)->stype == S_BOOLEAN || (yyvsp[-2].id)->stype == S_TRISTATE) {
+ menu_set_type((yyvsp[-2].id)->stype);
printd(DEBUG_PARSE, "%s:%d:type(%u)\n",
zconf_curname(), zconf_lineno(),
- (yyvsp[(1) - (3)].id)->stype);
+ (yyvsp[-2].id)->stype);
} else
YYERROR;
}
+
break;
- case 63:
+ case 65:
{
current_entry->sym->flags |= SYMBOL_OPTIONAL;
printd(DEBUG_PARSE, "%s:%d:optional\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 64:
+ case 66:
{
- if ((yyvsp[(1) - (4)].id)->stype == S_UNKNOWN) {
- menu_add_symbol(P_DEFAULT, sym_lookup((yyvsp[(2) - (4)].string), 0), (yyvsp[(3) - (4)].expr));
+ if ((yyvsp[-3].id)->stype == S_UNKNOWN) {
+ menu_add_symbol(P_DEFAULT, sym_lookup((yyvsp[-2].string), 0), (yyvsp[-1].expr));
printd(DEBUG_PARSE, "%s:%d:default\n",
zconf_curname(), zconf_lineno());
} else
YYERROR;
}
+
break;
- case 67:
+ case 69:
{
printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
menu_add_entry(NULL);
- menu_add_dep((yyvsp[(2) - (3)].expr));
+ menu_add_dep((yyvsp[-1].expr));
(yyval.menu) = menu_add_menu();
}
+
break;
- case 68:
+ case 70:
{
- if (zconf_endtoken((yyvsp[(1) - (1)].id), T_IF, T_ENDIF)) {
+ if (zconf_endtoken((yyvsp[0].id), T_IF, T_ENDIF)) {
menu_end_menu();
printd(DEBUG_PARSE, "%s:%d:endif\n", zconf_curname(), zconf_lineno());
}
}
+
break;
- case 74:
+ case 76:
{
- menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
+ menu_add_prompt(P_MENU, (yyvsp[-1].string), NULL);
}
+
break;
- case 75:
+ case 77:
{
menu_add_entry(NULL);
- menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
+ menu_add_prompt(P_MENU, (yyvsp[-1].string), NULL);
printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 76:
+ case 78:
{
(yyval.menu) = menu_add_menu();
}
+
break;
- case 77:
+ case 79:
{
- if (zconf_endtoken((yyvsp[(1) - (1)].id), T_MENU, T_ENDMENU)) {
+ if (zconf_endtoken((yyvsp[0].id), T_MENU, T_ENDMENU)) {
menu_end_menu();
printd(DEBUG_PARSE, "%s:%d:endmenu\n", zconf_curname(), zconf_lineno());
}
}
+
break;
- case 83:
+ case 85:
{
- printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
- zconf_nextfile((yyvsp[(2) - (3)].string));
+ printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[-1].string));
+ zconf_nextfile((yyvsp[-1].string));
}
+
break;
- case 84:
+ case 86:
{
menu_add_entry(NULL);
- menu_add_prompt(P_COMMENT, (yyvsp[(2) - (3)].string), NULL);
+ menu_add_prompt(P_COMMENT, (yyvsp[-1].string), NULL);
printd(DEBUG_PARSE, "%s:%d:comment\n", zconf_curname(), zconf_lineno());
}
+
break;
- case 85:
+ case 87:
{
menu_end_entry();
}
+
break;
- case 86:
+ case 88:
{
printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
zconf_starthelp();
}
+
break;
- case 87:
+ case 89:
{
- current_entry->help = (yyvsp[(2) - (2)].string);
+ current_entry->help = (yyvsp[0].string);
}
+
break;
- case 92:
+ case 94:
{
- menu_add_dep((yyvsp[(3) - (4)].expr));
+ menu_add_dep((yyvsp[-1].expr));
printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
}
- break;
-
- case 96:
- {
- menu_add_visibility((yyvsp[(2) - (2)].expr));
-}
break;
case 98:
{
- menu_add_prompt(P_PROMPT, (yyvsp[(1) - (2)].string), (yyvsp[(2) - (2)].expr));
+ menu_add_visibility((yyvsp[0].expr));
}
- break;
- case 101:
-
- { (yyval.id) = (yyvsp[(1) - (2)].id); }
break;
- case 102:
+ case 100:
+
+ {
+ menu_add_prompt(P_PROMPT, (yyvsp[-1].string), (yyvsp[0].expr));
+}
- { (yyval.id) = (yyvsp[(1) - (2)].id); }
break;
case 103:
- { (yyval.id) = (yyvsp[(1) - (2)].id); }
+ { (yyval.id) = (yyvsp[-1].id); }
+
break;
- case 106:
+ case 104:
+
+ { (yyval.id) = (yyvsp[-1].id); }
- { (yyval.expr) = NULL; }
break;
- case 107:
+ case 105:
+
+ { (yyval.id) = (yyvsp[-1].id); }
- { (yyval.expr) = (yyvsp[(2) - (2)].expr); }
break;
case 108:
- { (yyval.expr) = expr_alloc_symbol((yyvsp[(1) - (1)].symbol)); }
+ { (yyval.expr) = NULL; }
+
break;
case 109:
- { (yyval.expr) = expr_alloc_comp(E_LTH, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+ { (yyval.expr) = (yyvsp[0].expr); }
+
break;
case 110:
- { (yyval.expr) = expr_alloc_comp(E_LEQ, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+ { (yyval.expr) = expr_alloc_symbol((yyvsp[0].symbol)); }
+
break;
case 111:
- { (yyval.expr) = expr_alloc_comp(E_GTH, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+ { (yyval.expr) = expr_alloc_comp(E_LTH, (yyvsp[-2].symbol), (yyvsp[0].symbol)); }
+
break;
case 112:
- { (yyval.expr) = expr_alloc_comp(E_GEQ, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+ { (yyval.expr) = expr_alloc_comp(E_LEQ, (yyvsp[-2].symbol), (yyvsp[0].symbol)); }
+
break;
case 113:
- { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+ { (yyval.expr) = expr_alloc_comp(E_GTH, (yyvsp[-2].symbol), (yyvsp[0].symbol)); }
+
break;
case 114:
- { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); }
+ { (yyval.expr) = expr_alloc_comp(E_GEQ, (yyvsp[-2].symbol), (yyvsp[0].symbol)); }
+
break;
case 115:
- { (yyval.expr) = (yyvsp[(2) - (3)].expr); }
+ { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); }
+
break;
case 116:
- { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); }
+ { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[-2].symbol), (yyvsp[0].symbol)); }
+
break;
case 117:
- { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
+ { (yyval.expr) = (yyvsp[-1].expr); }
+
break;
case 118:
- { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); }
+ { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[0].expr)); }
+
break;
case 119:
- { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); }
+ { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[-2].expr), (yyvsp[0].expr)); }
+
break;
case 120:
- { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); }
+ { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[-2].expr), (yyvsp[0].expr)); }
+
break;
case 121:
+ { (yyval.symbol) = sym_lookup((yyvsp[0].string), 0); free((yyvsp[0].string)); }
+
+ break;
+
+ case 122:
+
+ { (yyval.symbol) = sym_lookup((yyvsp[0].string), SYMBOL_CONST); free((yyvsp[0].string)); }
+
+ break;
+
+ case 123:
+
{ (yyval.string) = NULL; }
+
break;
@@ -2120,7 +2008,7 @@ yyreduce:
*++yyvsp = yyval;
- /* Now `shift' the result of the reduction. Determine what state
+ /* Now 'shift' the result of the reduction. Determine what state
that goes to, based on the state we popped back to and the rule
number reduced by. */
@@ -2135,9 +2023,9 @@ yyreduce:
goto yynewstate;
-/*------------------------------------.
-| yyerrlab -- here on detecting error |
-`------------------------------------*/
+/*--------------------------------------.
+| yyerrlab -- here on detecting error. |
+`--------------------------------------*/
yyerrlab:
/* Make sure we have latest lookahead translation. See comments at
user semantic actions for why this is necessary. */
@@ -2188,20 +2076,20 @@ yyerrlab:
if (yyerrstatus == 3)
{
/* If just tried and failed to reuse lookahead token after an
- error, discard it. */
+ error, discard it. */
if (yychar <= YYEOF)
- {
- /* Return failure if at end of input. */
- if (yychar == YYEOF)
- YYABORT;
- }
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
else
- {
- yydestruct ("Error: discarding",
- yytoken, &yylval);
- yychar = YYEMPTY;
- }
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval);
+ yychar = YYEMPTY;
+ }
}
/* Else will try to reuse lookahead token after shifting the error
@@ -2220,7 +2108,7 @@ yyerrorlab:
if (/*CONSTCOND*/ 0)
goto yyerrorlab;
- /* Do not reclaim the symbols of the rule which action triggered
+ /* Do not reclaim the symbols of the rule whose action triggered
this YYERROR. */
YYPOPSTACK (yylen);
yylen = 0;
@@ -2233,35 +2121,37 @@ yyerrorlab:
| yyerrlab1 -- common code for both syntax error and YYERROR. |
`-------------------------------------------------------------*/
yyerrlab1:
- yyerrstatus = 3; /* Each real token shifted decrements this. */
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
for (;;)
{
yyn = yypact[yystate];
if (!yypact_value_is_default (yyn))
- {
- yyn += YYTERROR;
- if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
- {
- yyn = yytable[yyn];
- if (0 < yyn)
- break;
- }
- }
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
/* Pop the current state because it cannot handle the error token. */
if (yyssp == yyss)
- YYABORT;
+ YYABORT;
yydestruct ("Error: popping",
- yystos[yystate], yyvsp);
+ yystos[yystate], yyvsp);
YYPOPSTACK (1);
yystate = *yyssp;
YY_STACK_PRINT (yyss, yyssp);
}
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
*++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
/* Shift the error token. */
@@ -2304,14 +2194,14 @@ yyreturn:
yydestruct ("Cleanup: discarding lookahead",
yytoken, &yylval);
}
- /* Do not reclaim the symbols of the rule which action triggered
+ /* Do not reclaim the symbols of the rule whose action triggered
this YYABORT or YYACCEPT. */
YYPOPSTACK (yylen);
YY_STACK_PRINT (yyss, yyssp);
while (yyssp != yyss)
{
yydestruct ("Cleanup: popping",
- yystos[*yyssp], yyvsp);
+ yystos[*yyssp], yyvsp);
YYPOPSTACK (1);
}
#ifndef yyoverflow
@@ -2322,14 +2212,11 @@ yyreturn:
if (yymsg != yymsgbuf)
YYSTACK_FREE (yymsg);
#endif
- /* Make sure YYID is used. */
- return YYID (yyresult);
+ return yyresult;
}
-
-
void conf_parse(const char *name)
{
struct symbol *sym;
@@ -2501,6 +2388,11 @@ static void print_symbol(FILE *out, struct menu *menu)
expr_fprint(prop->expr, out);
fputc('\n', out);
break;
+ case P_IMPLY:
+ fputs( " imply ", out);
+ expr_fprint(prop->expr, out);
+ fputc('\n', out);
+ break;
case P_RANGE:
fputs( " range ", out);
expr_fprint(prop->expr, out);
@@ -2577,4 +2469,3 @@ void zconfdump(FILE *out)
#include "expr.c"
#include "symbol.c"
#include "menu.c"
-
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 71bf8bff696a..001305fa080b 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
static struct menu *current_menu, *current_entry;
%}
-%expect 30
+%expect 32
%union
{
@@ -62,6 +62,7 @@ static struct menu *current_menu, *current_entry;
%token <id>T_TYPE
%token <id>T_DEFAULT
%token <id>T_SELECT
+%token <id>T_IMPLY
%token <id>T_RANGE
%token <id>T_VISIBLE
%token <id>T_OPTION
@@ -124,7 +125,7 @@ stmt_list:
;
option_name:
- T_DEPENDS | T_PROMPT | T_TYPE | T_SELECT | T_OPTIONAL | T_RANGE | T_DEFAULT | T_VISIBLE
+ T_DEPENDS | T_PROMPT | T_TYPE | T_SELECT | T_IMPLY | T_OPTIONAL | T_RANGE | T_DEFAULT | T_VISIBLE
;
common_stmt:
@@ -216,6 +217,12 @@ config_option: T_SELECT T_WORD if_expr T_EOL
printd(DEBUG_PARSE, "%s:%d:select\n", zconf_curname(), zconf_lineno());
};
+config_option: T_IMPLY T_WORD if_expr T_EOL
+{
+ menu_add_symbol(P_IMPLY, sym_lookup($2, 0), $3);
+ printd(DEBUG_PARSE, "%s:%d:imply\n", zconf_curname(), zconf_lineno());
+};
+
config_option: T_RANGE symbol symbol if_expr T_EOL
{
menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,$2, $3), $4);
@@ -664,6 +671,11 @@ static void print_symbol(FILE *out, struct menu *menu)
expr_fprint(prop->expr, out);
fputc('\n', out);
break;
+ case P_IMPLY:
+ fputs( " imply ", out);
+ expr_fprint(prop->expr, out);
+ fputc('\n', out);
+ break;
case P_RANGE:
fputs( " range ", out);
expr_fprint(prop->expr, out);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 8a90a0b8c1e1..5ce633aabce6 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2612,7 +2612,8 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
}
task_unlock(current);
- update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
}
}
@@ -2642,9 +2643,11 @@ static void selinux_bprm_committed_creds(struct linux_binprm *bprm)
*/
rc = avc_has_perm(osid, sid, SECCLASS_PROCESS, PROCESS__SIGINH, NULL);
if (rc) {
- memset(&itimer, 0, sizeof itimer);
- for (i = 0; i < 3; i++)
- do_setitimer(i, &itimer, NULL);
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS)) {
+ memset(&itimer, 0, sizeof itimer);
+ for (i = 0; i < 3; i++)
+ do_setitimer(i, &itimer, NULL);
+ }
spin_lock_irq(&current->sighand->siglock);
if (!fatal_signal_pending(current)) {
flush_sigqueue(&current->pending);
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index a39629206864..cddd5d06e1cb 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 1dcb95e76f70..418871d02ebf 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -65,22 +65,22 @@ dep-cmd = $(if $(wildcard $(fixdep)),
printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
cat $(depfile) >> $(dot-target).cmd; \
- printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
+ printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
###
# if_changed_dep - execute command if any prerequisite is newer than
# target, or command line has changed and update
# dependencies in the cmd file
if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \
- @set -e; \
- $(echo-cmd) $(cmd_$(1)) && $(dep-cmd))
+ @set -e; \
+ $(echo-cmd) $(cmd_$(1)) && $(dep-cmd))
# if_changed - execute command if any prerequisite is newer than
# target, or command line has changed
-if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
- @set -e; \
- $(echo-cmd) $(cmd_$(1)); \
- printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd)
+if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
+ @set -e; \
+ $(echo-cmd) $(cmd_$(1)); \
+ printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd)
###
# C flags to be used in rule definitions, includes:
@@ -89,10 +89,12 @@ if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
# - per target C flags
# - per object C flags
# - BUILD_STR macro to allow '-D"$(variable)"' constructs
-c_flags = -Wp,-MD,$(depfile),-MT,$@ $(CFLAGS) -D"BUILD_STR(s)=\#s" $(CFLAGS_$(basetarget).o) $(CFLAGS_$(obj))
-cxx_flags = -Wp,-MD,$(depfile),-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXXFLAGS_$(basetarget).o) $(CXXFLAGS_$(obj))
+c_flags_1 = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CFLAGS) -D"BUILD_STR(s)=\#s" $(CFLAGS_$(basetarget).o) $(CFLAGS_$(obj))
+c_flags_2 = $(filter-out $(CFLAGS_REMOVE_$(basetarget).o), $(c_flags_1))
+c_flags = $(filter-out $(CFLAGS_REMOVE_$(obj)), $(c_flags_2))
+cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXXFLAGS_$(basetarget).o) $(CXXFLAGS_$(obj))
###
## HOSTCC C flags
-host_c_flags = -Wp,-MD,$(depfile),-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
+host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
diff --git a/tools/build/Documentation/Build.txt b/tools/build/Documentation/Build.txt
index a47bffbae159..a22587475dbe 100644
--- a/tools/build/Documentation/Build.txt
+++ b/tools/build/Documentation/Build.txt
@@ -135,8 +135,10 @@ CFLAGS
It's possible to alter the standard object C flags in the following way:
- CFLAGS_perf.o += '...' - alters CFLAGS for perf.o object
- CFLAGS_gtk += '...' - alters CFLAGS for gtk build object
+ CFLAGS_perf.o += '...' - adds CFLAGS for perf.o object
+ CFLAGS_gtk += '...' - adds CFLAGS for gtk build object
+ CFLAGS_REMOVE_perf.o += '...' - removes CFLAGS for perf.o object
+ CFLAGS_REMOVE_gtk += '...' - removes CFLAGS for gtk build object
This C flags changes has the scope of the Build makefile they are defined in.
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index ae52e029dd22..e3fb5ecbdcb6 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -27,58 +27,58 @@ endef
# the rule that uses them - an example for that is the 'bionic'
# feature check. ]
#
-FEATURE_TESTS_BASIC := \
- backtrace \
- dwarf \
- dwarf_getlocations \
- fortify-source \
- sync-compare-and-swap \
- glibc \
- gtk2 \
- gtk2-infobar \
- libaudit \
- libbfd \
- libelf \
- libelf-getphdrnum \
- libelf-gelf_getnote \
- libelf-getshdrstrndx \
- libelf-mmap \
- libnuma \
- numa_num_possible_cpus \
- libperl \
- libpython \
- libpython-version \
- libslang \
- libcrypto \
- libunwind \
- libunwind-x86 \
- libunwind-x86_64 \
- libunwind-arm \
- libunwind-aarch64 \
- pthread-attr-setaffinity-np \
- stackprotector-all \
- timerfd \
- libdw-dwarf-unwind \
- zlib \
- lzma \
- get_cpuid \
- bpf \
- sdt
+FEATURE_TESTS_BASIC := \
+ backtrace \
+ dwarf \
+ dwarf_getlocations \
+ fortify-source \
+ sync-compare-and-swap \
+ glibc \
+ gtk2 \
+ gtk2-infobar \
+ libaudit \
+ libbfd \
+ libelf \
+ libelf-getphdrnum \
+ libelf-gelf_getnote \
+ libelf-getshdrstrndx \
+ libelf-mmap \
+ libnuma \
+ numa_num_possible_cpus \
+ libperl \
+ libpython \
+ libpython-version \
+ libslang \
+ libcrypto \
+ libunwind \
+ libunwind-x86 \
+ libunwind-x86_64 \
+ libunwind-arm \
+ libunwind-aarch64 \
+ pthread-attr-setaffinity-np \
+ stackprotector-all \
+ timerfd \
+ libdw-dwarf-unwind \
+ zlib \
+ lzma \
+ get_cpuid \
+ bpf \
+ sdt
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
-FEATURE_TESTS_EXTRA := \
- bionic \
- compile-32 \
- compile-x32 \
- cplus-demangle \
- hello \
- libbabeltrace \
- liberty \
- liberty-z \
- libunwind-debug-frame \
- libunwind-debug-frame-arm \
- libunwind-debug-frame-aarch64
+FEATURE_TESTS_EXTRA := \
+ bionic \
+ compile-32 \
+ compile-x32 \
+ cplus-demangle \
+ hello \
+ libbabeltrace \
+ liberty \
+ liberty-z \
+ libunwind-debug-frame \
+ libunwind-debug-frame-arm \
+ libunwind-debug-frame-aarch64
FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
@@ -86,26 +86,26 @@ ifeq ($(FEATURE_TESTS),all)
FEATURE_TESTS := $(FEATURE_TESTS_BASIC) $(FEATURE_TESTS_EXTRA)
endif
-FEATURE_DISPLAY ?= \
- dwarf \
- dwarf_getlocations \
- glibc \
- gtk2 \
- libaudit \
- libbfd \
- libelf \
- libnuma \
- numa_num_possible_cpus \
- libperl \
- libpython \
- libslang \
- libcrypto \
- libunwind \
- libdw-dwarf-unwind \
- zlib \
- lzma \
- get_cpuid \
- bpf
+FEATURE_DISPLAY ?= \
+ dwarf \
+ dwarf_getlocations \
+ glibc \
+ gtk2 \
+ libaudit \
+ libbfd \
+ libelf \
+ libnuma \
+ numa_num_possible_cpus \
+ libperl \
+ libpython \
+ libslang \
+ libcrypto \
+ libunwind \
+ libdw-dwarf-unwind \
+ zlib \
+ lzma \
+ get_cpuid \
+ bpf
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index ac9c477a2a48..b564a2eea039 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -1,59 +1,61 @@
-FILES= \
- test-all.bin \
- test-backtrace.bin \
- test-bionic.bin \
- test-dwarf.bin \
- test-dwarf_getlocations.bin \
- test-fortify-source.bin \
- test-sync-compare-and-swap.bin \
- test-glibc.bin \
- test-gtk2.bin \
- test-gtk2-infobar.bin \
- test-hello.bin \
- test-libaudit.bin \
- test-libbfd.bin \
- test-liberty.bin \
- test-liberty-z.bin \
- test-cplus-demangle.bin \
- test-libelf.bin \
- test-libelf-getphdrnum.bin \
- test-libelf-gelf_getnote.bin \
- test-libelf-getshdrstrndx.bin \
- test-libelf-mmap.bin \
- test-libnuma.bin \
- test-numa_num_possible_cpus.bin \
- test-libperl.bin \
- test-libpython.bin \
- test-libpython-version.bin \
- test-libslang.bin \
- test-libcrypto.bin \
- test-libunwind.bin \
- test-libunwind-debug-frame.bin \
- test-libunwind-x86.bin \
- test-libunwind-x86_64.bin \
- test-libunwind-arm.bin \
- test-libunwind-aarch64.bin \
- test-libunwind-debug-frame-arm.bin \
- test-libunwind-debug-frame-aarch64.bin \
- test-pthread-attr-setaffinity-np.bin \
- test-stackprotector-all.bin \
- test-timerfd.bin \
- test-libdw-dwarf-unwind.bin \
- test-libbabeltrace.bin \
- test-compile-32.bin \
- test-compile-x32.bin \
- test-zlib.bin \
- test-lzma.bin \
- test-bpf.bin \
- test-get_cpuid.bin \
- test-sdt.bin \
- test-cxx.bin
+FILES= \
+ test-all.bin \
+ test-backtrace.bin \
+ test-bionic.bin \
+ test-dwarf.bin \
+ test-dwarf_getlocations.bin \
+ test-fortify-source.bin \
+ test-sync-compare-and-swap.bin \
+ test-glibc.bin \
+ test-gtk2.bin \
+ test-gtk2-infobar.bin \
+ test-hello.bin \
+ test-libaudit.bin \
+ test-libbfd.bin \
+ test-liberty.bin \
+ test-liberty-z.bin \
+ test-cplus-demangle.bin \
+ test-libelf.bin \
+ test-libelf-getphdrnum.bin \
+ test-libelf-gelf_getnote.bin \
+ test-libelf-getshdrstrndx.bin \
+ test-libelf-mmap.bin \
+ test-libnuma.bin \
+ test-numa_num_possible_cpus.bin \
+ test-libperl.bin \
+ test-libpython.bin \
+ test-libpython-version.bin \
+ test-libslang.bin \
+ test-libcrypto.bin \
+ test-libunwind.bin \
+ test-libunwind-debug-frame.bin \
+ test-libunwind-x86.bin \
+ test-libunwind-x86_64.bin \
+ test-libunwind-arm.bin \
+ test-libunwind-aarch64.bin \
+ test-libunwind-debug-frame-arm.bin \
+ test-libunwind-debug-frame-aarch64.bin \
+ test-pthread-attr-setaffinity-np.bin \
+ test-stackprotector-all.bin \
+ test-timerfd.bin \
+ test-libdw-dwarf-unwind.bin \
+ test-libbabeltrace.bin \
+ test-compile-32.bin \
+ test-compile-x32.bin \
+ test-zlib.bin \
+ test-lzma.bin \
+ test-bpf.bin \
+ test-get_cpuid.bin \
+ test-sdt.bin \
+ test-cxx.bin \
+ test-jvmti.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
CC := $(CROSS_COMPILE)gcc -MD
CXX := $(CROSS_COMPILE)g++ -MD
PKG_CONFIG := $(CROSS_COMPILE)pkg-config
+LLVM_CONFIG ?= llvm-config
all: $(FILES)
@@ -225,6 +227,30 @@ $(OUTPUT)test-sdt.bin:
$(OUTPUT)test-cxx.bin:
$(BUILDXX) -std=gnu++11
+$(OUTPUT)test-jvmti.bin:
+ $(BUILD)
+
+$(OUTPUT)test-llvm.bin:
+ $(BUILDXX) -std=gnu++11 \
+ -I$(shell $(LLVM_CONFIG) --includedir) \
+ -L$(shell $(LLVM_CONFIG) --libdir) \
+ $(shell $(LLVM_CONFIG) --libs Core BPF) \
+ $(shell $(LLVM_CONFIG) --system-libs)
+
+$(OUTPUT)test-llvm-version.bin:
+ $(BUILDXX) -std=gnu++11 \
+ -I$(shell $(LLVM_CONFIG) --includedir)
+
+$(OUTPUT)test-clang.bin:
+ $(BUILDXX) -std=gnu++11 \
+ -I$(shell $(LLVM_CONFIG) --includedir) \
+ -L$(shell $(LLVM_CONFIG) --libdir) \
+ -Wl,--start-group -lclangBasic -lclangDriver \
+ -lclangFrontend -lclangEdit -lclangLex \
+ -lclangAST -Wl,--end-group \
+ $(shell $(LLVM_CONFIG) --libs Core option) \
+ $(shell $(LLVM_CONFIG) --system-libs)
+
-include $(OUTPUT)*.d
###############################
diff --git a/tools/build/feature/test-clang.cpp b/tools/build/feature/test-clang.cpp
new file mode 100644
index 000000000000..e23c1b1f1b91
--- /dev/null
+++ b/tools/build/feature/test-clang.cpp
@@ -0,0 +1,21 @@
+#include "clang/Basic/VirtualFileSystem.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace clang::driver;
+
+int main()
+{
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+
+ DiagnosticsEngine Diags(DiagID, &*DiagOpts);
+ Driver TheDriver("test", "bpf-pc-linux", Diags);
+
+ llvm::llvm_shutdown();
+ return 0;
+}
diff --git a/tools/build/feature/test-jvmti.c b/tools/build/feature/test-jvmti.c
new file mode 100644
index 000000000000..1c665f09b9d6
--- /dev/null
+++ b/tools/build/feature/test-jvmti.c
@@ -0,0 +1,13 @@
+#include <jvmti.h>
+#include <jvmticmlr.h>
+
+int main(void)
+{
+ JavaVM jvm __attribute__((unused));
+ jvmtiEventCallbacks cb __attribute__((unused));
+ jvmtiCapabilities caps __attribute__((unused));
+ jvmtiJlocationFormat format __attribute__((unused));
+ jvmtiEnv jvmti __attribute__((unused));
+
+ return 0;
+}
diff --git a/tools/build/feature/test-llvm-version.cpp b/tools/build/feature/test-llvm-version.cpp
new file mode 100644
index 000000000000..896d31724568
--- /dev/null
+++ b/tools/build/feature/test-llvm-version.cpp
@@ -0,0 +1,11 @@
+#include <cstdio>
+#include "llvm/Config/llvm-config.h"
+
+#define NUM_VERSION (((LLVM_VERSION_MAJOR) << 16) + (LLVM_VERSION_MINOR << 8) + LLVM_VERSION_PATCH)
+#define pass int main() {printf("%x\n", NUM_VERSION); return 0;}
+
+#if NUM_VERSION >= 0x030900
+pass
+#else
+# error This LLVM is not tested yet.
+#endif
diff --git a/tools/build/feature/test-llvm.cpp b/tools/build/feature/test-llvm.cpp
new file mode 100644
index 000000000000..455a332dc8a8
--- /dev/null
+++ b/tools/build/feature/test-llvm.cpp
@@ -0,0 +1,13 @@
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/raw_ostream.h"
+#define NUM_VERSION (((LLVM_VERSION_MAJOR) << 16) + (LLVM_VERSION_MINOR << 8) + LLVM_VERSION_PATCH)
+
+#if NUM_VERSION < 0x030900
+# error "LLVM version too low"
+#endif
+int main()
+{
+ llvm::errs() << "Hello World!\n";
+ llvm::llvm_shutdown();
+ return 0;
+}
diff --git a/tools/build/fixdep.c b/tools/build/fixdep.c
index 1521d36cef0d..734d1547cbae 100644
--- a/tools/build/fixdep.c
+++ b/tools/build/fixdep.c
@@ -49,7 +49,7 @@ static void parse_dep_file(void *map, size_t len)
char *end = m + len;
char *p;
char s[PATH_MAX];
- int is_target;
+ int is_target, has_target = 0;
int saw_any_target = 0;
int is_first_dep = 0;
@@ -67,7 +67,8 @@ static void parse_dep_file(void *map, size_t len)
if (is_target) {
/* The /next/ file is the first dependency */
is_first_dep = 1;
- } else {
+ has_target = 1;
+ } else if (has_target) {
/* Save this token/filename */
memcpy(s, m, p-m);
s[p - m] = 0;
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 653d1bad77de..0304600121da 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -13,6 +13,7 @@
*/
#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__ffz.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
diff --git a/tools/include/asm-generic/bitops/__ffz.h b/tools/include/asm-generic/bitops/__ffz.h
new file mode 100644
index 000000000000..6744bd4cdf46
--- /dev/null
+++ b/tools/include/asm-generic/bitops/__ffz.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
+#define _ASM_GENERIC_BITOPS_FFZ_H_
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x) __ffs(~(x))
+
+#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h
index 31f51547fcd4..5538ecdc964a 100644
--- a/tools/include/asm-generic/bitops/find.h
+++ b/tools/include/asm-generic/bitops/find.h
@@ -15,6 +15,21 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
size, unsigned long offset);
#endif
+#ifndef find_next_zero_bit
+
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset);
+#endif
+
#ifndef find_first_bit
/**
@@ -30,4 +45,17 @@ extern unsigned long find_first_bit(const unsigned long *addr,
#endif /* find_first_bit */
+#ifndef find_first_zero_bit
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size);
+#endif
+
#endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 49c929a104ee..fc446343ff41 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -39,6 +39,11 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = find_first_zero_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
/* same as for_each_set_bit() but use bit as value to start with */
#define for_each_set_bit_from(bit, addr, size) \
for ((bit) = find_next_bit((addr), (size), (bit)); \
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 58274382a616..8c27db0c5c08 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -72,4 +72,9 @@
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_MASK 0x3f
+#define PKEY_DISABLE_ACCESS 0x1
+#define PKEY_DISABLE_WRITE 0x2
+#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS |\
+ PKEY_DISABLE_WRITE)
+
#endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 4212ed62235b..8143536b462a 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -110,3 +110,59 @@ int bpf_map_update_elem(int fd, void *key, void *value,
return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
}
+
+int bpf_map_lookup_elem(int fd, void *key, void *value)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+
+ return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+}
+
+int bpf_map_delete_elem(int fd, void *key)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+
+ return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
+}
+
+int bpf_map_get_next_key(int fd, void *key, void *next_key)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.next_key = ptr_to_u64(next_key);
+
+ return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
+}
+
+int bpf_obj_pin(int fd, const char *pathname)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.pathname = ptr_to_u64((void *)pathname);
+ attr.bpf_fd = fd;
+
+ return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
+}
+
+int bpf_obj_get(const char *pathname)
+{
+ union bpf_attr attr;
+
+ bzero(&attr, sizeof(attr));
+ attr.pathname = ptr_to_u64((void *)pathname);
+
+ return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index e8ba54087497..253c3dbb06b4 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -35,4 +35,11 @@ int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
int bpf_map_update_elem(int fd, void *key, void *value,
u64 flags);
+
+int bpf_map_lookup_elem(int fd, void *key, void *value);
+int bpf_map_delete_elem(int fd, void *key);
+int bpf_map_get_next_key(int fd, void *key, void *next_key);
+int bpf_obj_pin(int fd, const char *pathname);
+int bpf_obj_get(const char *pathname);
+
#endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index b699aea9a025..2e974593f3e8 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -185,6 +185,7 @@ struct bpf_program {
struct bpf_map {
int fd;
char *name;
+ size_t offset;
struct bpf_map_def def;
void *priv;
bpf_map_clear_priv_t clear_priv;
@@ -228,6 +229,10 @@ struct bpf_object {
* all objects.
*/
struct list_head list;
+
+ void *priv;
+ bpf_object_clear_priv_t clear_priv;
+
char path[];
};
#define obj_elf_valid(o) ((o)->efile.elf)
@@ -513,57 +518,106 @@ bpf_object__init_kversion(struct bpf_object *obj,
}
static int
-bpf_object__init_maps(struct bpf_object *obj, void *data,
- size_t size)
+bpf_object__validate_maps(struct bpf_object *obj)
{
- size_t nr_maps;
int i;
- nr_maps = size / sizeof(struct bpf_map_def);
- if (!data || !nr_maps) {
- pr_debug("%s doesn't need map definition\n",
- obj->path);
+ /*
+ * If there's only 1 map, the only error case should have been
+ * catched in bpf_object__init_maps().
+ */
+ if (!obj->maps || !obj->nr_maps || (obj->nr_maps == 1))
return 0;
- }
- pr_debug("maps in %s: %zd bytes\n", obj->path, size);
+ for (i = 1; i < obj->nr_maps; i++) {
+ const struct bpf_map *a = &obj->maps[i - 1];
+ const struct bpf_map *b = &obj->maps[i];
- obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
- if (!obj->maps) {
- pr_warning("alloc maps for object failed\n");
- return -ENOMEM;
+ if (b->offset - a->offset < sizeof(struct bpf_map_def)) {
+ pr_warning("corrupted map section in %s: map \"%s\" too small\n",
+ obj->path, a->name);
+ return -EINVAL;
+ }
}
- obj->nr_maps = nr_maps;
-
- for (i = 0; i < nr_maps; i++) {
- struct bpf_map_def *def = &obj->maps[i].def;
+ return 0;
+}
- /*
- * fill all fd with -1 so won't close incorrect
- * fd (fd=0 is stdin) when failure (zclose won't close
- * negative fd)).
- */
- obj->maps[i].fd = -1;
+static int compare_bpf_map(const void *_a, const void *_b)
+{
+ const struct bpf_map *a = _a;
+ const struct bpf_map *b = _b;
- /* Save map definition into obj->maps */
- *def = ((struct bpf_map_def *)data)[i];
- }
- return 0;
+ return a->offset - b->offset;
}
static int
-bpf_object__init_maps_name(struct bpf_object *obj)
+bpf_object__init_maps(struct bpf_object *obj)
{
- int i;
+ int i, map_idx, nr_maps = 0;
+ Elf_Scn *scn;
+ Elf_Data *data;
Elf_Data *symbols = obj->efile.symbols;
- if (!symbols || obj->efile.maps_shndx < 0)
+ if (obj->efile.maps_shndx < 0)
+ return -EINVAL;
+ if (!symbols)
+ return -EINVAL;
+
+ scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
+ if (scn)
+ data = elf_getdata(scn, NULL);
+ if (!scn || !data) {
+ pr_warning("failed to get Elf_Data from map section %d\n",
+ obj->efile.maps_shndx);
return -EINVAL;
+ }
+ /*
+ * Count number of maps. Each map has a name.
+ * Array of maps is not supported: only the first element is
+ * considered.
+ *
+ * TODO: Detect array of map and report error.
+ */
for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
GElf_Sym sym;
- size_t map_idx;
+
+ if (!gelf_getsym(symbols, i, &sym))
+ continue;
+ if (sym.st_shndx != obj->efile.maps_shndx)
+ continue;
+ nr_maps++;
+ }
+
+ /* Alloc obj->maps and fill nr_maps. */
+ pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
+ nr_maps, data->d_size);
+
+ if (!nr_maps)
+ return 0;
+
+ obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
+ if (!obj->maps) {
+ pr_warning("alloc maps for object failed\n");
+ return -ENOMEM;
+ }
+ obj->nr_maps = nr_maps;
+
+ /*
+ * fill all fd with -1 so won't close incorrect
+ * fd (fd=0 is stdin) when failure (zclose won't close
+ * negative fd)).
+ */
+ for (i = 0; i < nr_maps; i++)
+ obj->maps[i].fd = -1;
+
+ /*
+ * Fill obj->maps using data in "maps" section.
+ */
+ for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
+ GElf_Sym sym;
const char *map_name;
+ struct bpf_map_def *def;
if (!gelf_getsym(symbols, i, &sym))
continue;
@@ -573,21 +627,27 @@ bpf_object__init_maps_name(struct bpf_object *obj)
map_name = elf_strptr(obj->efile.elf,
obj->efile.strtabidx,
sym.st_name);
- map_idx = sym.st_value / sizeof(struct bpf_map_def);
- if (map_idx >= obj->nr_maps) {
- pr_warning("index of map \"%s\" is buggy: %zu > %zu\n",
- map_name, map_idx, obj->nr_maps);
- continue;
+ obj->maps[map_idx].offset = sym.st_value;
+ if (sym.st_value + sizeof(struct bpf_map_def) > data->d_size) {
+ pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
+ obj->path, map_name);
+ return -EINVAL;
}
+
obj->maps[map_idx].name = strdup(map_name);
if (!obj->maps[map_idx].name) {
pr_warning("failed to alloc map name\n");
return -ENOMEM;
}
- pr_debug("map %zu is \"%s\"\n", map_idx,
+ pr_debug("map %d is \"%s\"\n", map_idx,
obj->maps[map_idx].name);
+ def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
+ obj->maps[map_idx].def = *def;
+ map_idx++;
}
- return 0;
+
+ qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
+ return bpf_object__validate_maps(obj);
}
static int bpf_object__elf_collect(struct bpf_object *obj)
@@ -645,11 +705,9 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
err = bpf_object__init_kversion(obj,
data->d_buf,
data->d_size);
- else if (strcmp(name, "maps") == 0) {
- err = bpf_object__init_maps(obj, data->d_buf,
- data->d_size);
+ else if (strcmp(name, "maps") == 0)
obj->efile.maps_shndx = idx;
- } else if (sh.sh_type == SHT_SYMTAB) {
+ else if (sh.sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
obj->path);
@@ -698,7 +756,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
return LIBBPF_ERRNO__FORMAT;
}
if (obj->efile.maps_shndx >= 0)
- err = bpf_object__init_maps_name(obj);
+ err = bpf_object__init_maps(obj);
out:
return err;
}
@@ -807,7 +865,7 @@ bpf_object__create_maps(struct bpf_object *obj)
zclose(obj->maps[j].fd);
return err;
}
- pr_debug("create map: fd=%d\n", *pfd);
+ pr_debug("create map %s: fd=%d\n", obj->maps[i].name, *pfd);
}
return 0;
@@ -1175,6 +1233,9 @@ void bpf_object__close(struct bpf_object *obj)
if (!obj)
return;
+ if (obj->clear_priv)
+ obj->clear_priv(obj, obj->priv);
+
bpf_object__elf_finish(obj);
bpf_object__unload(obj);
@@ -1228,6 +1289,22 @@ unsigned int bpf_object__kversion(struct bpf_object *obj)
return obj ? obj->kern_version : 0;
}
+int bpf_object__set_priv(struct bpf_object *obj, void *priv,
+ bpf_object_clear_priv_t clear_priv)
+{
+ if (obj->priv && obj->clear_priv)
+ obj->clear_priv(obj, obj->priv);
+
+ obj->priv = priv;
+ obj->clear_priv = clear_priv;
+ return 0;
+}
+
+void *bpf_object__priv(struct bpf_object *obj)
+{
+ return obj ? obj->priv : ERR_PTR(-EINVAL);
+}
+
struct bpf_program *
bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
{
@@ -1447,3 +1524,15 @@ bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
}
return NULL;
}
+
+struct bpf_map *
+bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
+{
+ int i;
+
+ for (i = 0; i < obj->nr_maps; i++) {
+ if (obj->maps[i].offset == offset)
+ return &obj->maps[i];
+ }
+ return ERR_PTR(-ENOENT);
+}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index dd7a513efb10..a5a8b86a06fe 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -24,6 +24,7 @@
#include <stdio.h>
#include <stdbool.h>
#include <linux/err.h>
+#include <sys/types.h> // for size_t
enum libbpf_errno {
__LIBBPF_ERRNO__START = 4000,
@@ -79,6 +80,11 @@ struct bpf_object *bpf_object__next(struct bpf_object *prev);
(pos) != NULL; \
(pos) = (tmp), (tmp) = bpf_object__next(tmp))
+typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
+int bpf_object__set_priv(struct bpf_object *obj, void *priv,
+ bpf_object_clear_priv_t clear_priv);
+void *bpf_object__priv(struct bpf_object *prog);
+
/* Accessors of bpf_program. */
struct bpf_program;
struct bpf_program *bpf_program__next(struct bpf_program *prog,
@@ -195,6 +201,13 @@ struct bpf_map;
struct bpf_map *
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name);
+/*
+ * Get bpf_map through the offset of corresponding struct bpf_map_def
+ * in the bpf object file.
+ */
+struct bpf_map *
+bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
+
struct bpf_map *
bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
#define bpf_map__for_each(pos, obj) \
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index 9122a9e80046..6d8b8f22cf55 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -82,3 +82,28 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
return size;
}
#endif
+
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ unsigned long idx;
+
+ for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
+ if (addr[idx] != ~0UL)
+ return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
+ }
+
+ return size;
+}
+#endif
+
+#ifndef find_next_zero_bit
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ return _find_next_bit(addr, size, offset, ~0UL);
+}
+#endif
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index 981bb4481fd5..3284bb14ae78 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -314,12 +314,19 @@ static int get_value(struct parse_opt_ctx_t *p,
static int parse_short_opt(struct parse_opt_ctx_t *p, const struct option *options)
{
+retry:
for (; options->type != OPTION_END; options++) {
if (options->short_name == *p->opt) {
p->opt = p->opt[1] ? p->opt + 1 : NULL;
return get_value(p, options, OPT_SHORT);
}
}
+
+ if (options->parent) {
+ options = options->parent;
+ goto retry;
+ }
+
return -2;
}
@@ -333,6 +340,7 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg,
if (!arg_end)
arg_end = arg + strlen(arg);
+retry:
for (; options->type != OPTION_END; options++) {
const char *rest;
int flags = 0;
@@ -426,6 +434,12 @@ match:
}
if (abbrev_option)
return get_value(p, abbrev_option, abbrev_flags);
+
+ if (options->parent) {
+ options = options->parent;
+ goto retry;
+ }
+
return -2;
}
diff --git a/tools/lib/subcmd/parse-options.h b/tools/lib/subcmd/parse-options.h
index d60cab2726da..8866ac438b34 100644
--- a/tools/lib/subcmd/parse-options.h
+++ b/tools/lib/subcmd/parse-options.h
@@ -109,11 +109,13 @@ struct option {
intptr_t defval;
bool *set;
void *data;
+ const struct option *parent;
};
#define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v )
#define OPT_END() { .type = OPTION_END }
+#define OPT_PARENT(p) { .type = OPTION_END, .parent = (p) }
#define OPT_ARGUMENT(l, h) { .type = OPTION_ARGUMENT, .long_name = (l), .help = (h) }
#define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) }
#define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h), .defval = (b) }
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 7851df1490e0..c76012ebdb9c 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -99,8 +99,6 @@ libdir_SQ = $(subst ','\'',$(libdir))
libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
-LIB_FILE = libtraceevent.a libtraceevent.so
-
CONFIG_INCLUDES =
CONFIG_LIBS =
CONFIG_FLAGS =
@@ -114,6 +112,9 @@ N =
EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
+LIB_TARGET = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION)
+LIB_INSTALL = libtraceevent.a libtraceevent.so*
+
INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
# Set compile option CFLAGS
@@ -156,11 +157,11 @@ PLUGINS += plugin_cfg80211.so
PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS))
PLUGINS_IN := $(PLUGINS:.so=-in.o)
-TE_IN := $(OUTPUT)libtraceevent-in.o
-LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
+TE_IN := $(OUTPUT)libtraceevent-in.o
+LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list
-CMD_TARGETS = $(LIB_FILE) $(PLUGINS) $(DYNAMIC_LIST_FILE)
+CMD_TARGETS = $(LIB_TARGET) $(PLUGINS) $(DYNAMIC_LIST_FILE)
TARGETS = $(CMD_TARGETS)
@@ -171,8 +172,10 @@ all_cmd: $(CMD_TARGETS)
$(TE_IN): force
$(Q)$(MAKE) $(build)=libtraceevent
-$(OUTPUT)libtraceevent.so: $(TE_IN)
- $(QUIET_LINK)$(CC) --shared $^ -o $@
+$(OUTPUT)libtraceevent.so.$(EVENT_PARSE_VERSION): $(TE_IN)
+ $(QUIET_LINK)$(CC) --shared $^ -Wl,-soname,libtraceevent.so.$(EP_VERSION) -o $@
+ @ln -sf $(@F) $(OUTPUT)libtraceevent.so
+ @ln -sf $(@F) $(OUTPUT)libtraceevent.so.$(EP_VERSION)
$(OUTPUT)libtraceevent.a: $(TE_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
@@ -236,11 +239,15 @@ TAGS: force
find . -name '*.[ch]' | xargs etags \
--regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
define do_install
- if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
- fi; \
- $(INSTALL) $1 '$(DESTDIR_SQ)$2'
+ $(call do_install_mkdir,$2); \
+ $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
endef
define do_install_plugins
@@ -257,13 +264,20 @@ define do_generate_dynamic_list_file
endef
install_lib: all_cmd install_plugins
- $(call QUIET_INSTALL, $(LIB_FILE)) \
- $(call do_install,$(LIB_FILE),$(libdir_SQ))
+ $(call QUIET_INSTALL, $(LIB_TARGET)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIB_INSTALL) $(DESTDIR)$(libdir_SQ)
install_plugins: $(PLUGINS)
$(call QUIET_INSTALL, trace_plugins) \
$(call do_install_plugins, $(PLUGINS))
+install_headers:
+ $(call QUIET_INSTALL, headers) \
+ $(call do_install,event-parse.h,$(prefix)/include/traceevent,644); \
+ $(call do_install,event-utils.h,$(prefix)/include/traceevent,644); \
+ $(call do_install,kbuffer.h,$(prefix)/include/traceevent,644)
+
install: install_lib
clean:
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 664c90c8e22b..14a4f623c1a5 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -33,6 +33,7 @@
#include <stdint.h>
#include <limits.h>
#include <linux/string.h>
+#include <linux/time64.h>
#include <netinet/in.h>
#include "event-parse.h"
@@ -5191,11 +5192,11 @@ struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type
}
/**
- * pevent_data_pid - parse the PID from raw data
+ * pevent_data_pid - parse the PID from record
* @pevent: a handle to the pevent
* @rec: the record to parse
*
- * This returns the PID from a raw data.
+ * This returns the PID from a record.
*/
int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec)
{
@@ -5203,6 +5204,32 @@ int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec)
}
/**
+ * pevent_data_prempt_count - parse the preempt count from the record
+ * @pevent: a handle to the pevent
+ * @rec: the record to parse
+ *
+ * This returns the preempt count from a record.
+ */
+int pevent_data_prempt_count(struct pevent *pevent, struct pevent_record *rec)
+{
+ return parse_common_pc(pevent, rec->data);
+}
+
+/**
+ * pevent_data_flags - parse the latency flags from the record
+ * @pevent: a handle to the pevent
+ * @rec: the record to parse
+ *
+ * This returns the latency flags from a record.
+ *
+ * Use trace_flag_type enum for the flags (see event-parse.h).
+ */
+int pevent_data_flags(struct pevent *pevent, struct pevent_record *rec)
+{
+ return parse_common_flags(pevent, rec->data);
+}
+
+/**
* pevent_data_comm_from_pid - return the command line from PID
* @pevent: a handle to the pevent
* @pid: the PID of the task to search for
@@ -5424,8 +5451,8 @@ void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
use_usec_format = is_timestamp_in_us(pevent->trace_clock,
use_trace_clock);
if (use_usec_format) {
- secs = record->ts / NSECS_PER_SEC;
- nsecs = record->ts - secs * NSECS_PER_SEC;
+ secs = record->ts / NSEC_PER_SEC;
+ nsecs = record->ts - secs * NSEC_PER_SEC;
}
if (pevent->latency_format) {
@@ -5437,10 +5464,10 @@ void pevent_print_event_time(struct pevent *pevent, struct trace_seq *s,
usecs = nsecs;
p = 9;
} else {
- usecs = (nsecs + 500) / NSECS_PER_USEC;
+ usecs = (nsecs + 500) / NSEC_PER_USEC;
/* To avoid usecs larger than 1 sec */
- if (usecs >= 1000000) {
- usecs -= 1000000;
+ if (usecs >= USEC_PER_SEC) {
+ usecs -= USEC_PER_SEC;
secs++;
}
p = 6;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 9ffde377e89d..7aae746ec2fe 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -172,9 +172,6 @@ struct pevent_plugin_option {
#define PEVENT_PLUGIN_OPTIONS_NAME MAKE_STR(PEVENT_PLUGIN_OPTIONS)
#define PEVENT_PLUGIN_ALIAS_NAME MAKE_STR(PEVENT_PLUGIN_ALIAS)
-#define NSECS_PER_SEC 1000000000ULL
-#define NSECS_PER_USEC 1000ULL
-
enum format_flags {
FIELD_IS_ARRAY = 1,
FIELD_IS_POINTER = 2,
@@ -712,6 +709,8 @@ void pevent_data_lat_fmt(struct pevent *pevent,
int pevent_data_type(struct pevent *pevent, struct pevent_record *rec);
struct event_format *pevent_data_event_from_type(struct pevent *pevent, int type);
int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec);
+int pevent_data_prempt_count(struct pevent *pevent, struct pevent_record *rec);
+int pevent_data_flags(struct pevent *pevent, struct pevent_record *rec);
const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid);
struct cmdline;
struct cmdline *pevent_data_pid_from_comm(struct pevent *pevent, const char *comm,
diff --git a/tools/perf/Build b/tools/perf/Build
index a43fae7f439a..b12d5d1666e3 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -21,6 +21,7 @@ perf-y += builtin-inject.o
perf-y += builtin-mem.o
perf-y += builtin-data.o
perf-y += builtin-version.o
+perf-y += builtin-c2c.o
perf-$(CONFIG_AUDIT) += builtin-trace.o
perf-$(CONFIG_LIBELF) += builtin-probe.o
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
index c6c8318e38a2..b0b3007d3c9c 100644
--- a/tools/perf/Documentation/intel-pt.txt
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -550,6 +550,18 @@ Unless /proc/sys/kernel/perf_event_paranoid is set to -1, unprivileged users
have memory limits imposed upon them. That affects what buffer sizes they can
have as outlined above.
+The v4.2 kernel introduced support for a context switch metadata event,
+PERF_RECORD_SWITCH, which allows unprivileged users to see when their processes
+are scheduled out and in, just not by whom, which is left for the
+PERF_RECORD_SWITCH_CPU_WIDE, that is only accessible in system wide context,
+which in turn requires CAP_SYS_ADMIN.
+
+Please see the 45ac1403f564 ("perf: Add PERF_RECORD_SWITCH to indicate context
+switches") commit, that introduces these metadata events for further info.
+
+When working with kernels < v4.2, the following considerations must be taken,
+as the sched:sched_switch tracepoints will be used to receive such information:
+
Unless /proc/sys/kernel/perf_event_paranoid is set to -1, unprivileged users are
not permitted to use tracepoints which means there is insufficient side-band
information to decode Intel PT in per-cpu mode, and potentially workload-only
@@ -564,8 +576,11 @@ sched_switch tracepoint
-----------------------
The sched_switch tracepoint is used to provide side-band data for Intel PT
-decoding. sched_switch events are automatically added. e.g. the second event
-shown below
+decoding in kernels where the PERF_RECORD_SWITCH metadata event isn't
+available.
+
+The sched_switch events are automatically added. e.g. the second event shown
+below:
$ perf record -vv -e intel_pt//u uname
------------------------------------------------------------
diff --git a/tools/perf/Documentation/jitdump-specification.txt b/tools/perf/Documentation/jitdump-specification.txt
new file mode 100644
index 000000000000..4c62b0713651
--- /dev/null
+++ b/tools/perf/Documentation/jitdump-specification.txt
@@ -0,0 +1,170 @@
+JITDUMP specification version 2
+Last Revised: 09/15/2016
+Author: Stephane Eranian <eranian@gmail.com>
+
+--------------------------------------------------------
+| Revision | Date | Description |
+--------------------------------------------------------
+| 1 | 09/07/2016 | Initial revision |
+--------------------------------------------------------
+| 2 | 09/15/2016 | Add JIT_CODE_UNWINDING_INFO |
+--------------------------------------------------------
+
+
+I/ Introduction
+
+
+This document describes the jitdump file format. The file is generated by Just-In-time compiler runtimes to save meta-data information about the generated code, such as address, size, and name of generated functions, the native code generated, the source line information. The data may then be used by performance tools, such as Linux perf to generate function and assembly level profiles.
+
+The format is not specific to any particular programming language. It can be extended as need be.
+
+The format of the file is binary. It is self-describing in terms of endianness and is portable across multiple processor architectures.
+
+
+II/ Overview of the format
+
+
+The format requires only sequential accesses, i.e., append only mode. The file starts with a fixed size file header describing the version of the specification, the endianness.
+
+The header is followed by a series of records, each starting with a fixed size header describing the type of record and its size. It is, itself, followed by the payload for the record. Records can have a variable size even for a given type.
+
+Each entry in the file is timestamped. All timestamps must use the same clock source. The CLOCK_MONOTONIC clock source is recommended.
+
+
+III/ Jitdump file header format
+
+Each jitdump file starts with a fixed size header containing the following fields in order:
+
+
+* uint32_t magic : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It is 0x4A695444 or 0x4454694a depending on the endianness. The field can be used to detect the endianness of the file
+* uint32_t version : a 4-byte value representing the format version. It is currently set to 2
+* uint32_t total_size: size in bytes of file header
+* uint32_t elf_mach : ELF architecture encoding (ELF e_machine value as specified in /usr/include/elf.h)
+* uint32_t pad1 : padding. Reserved for future use
+* uint32_t pid : JIT runtime process identification (OS specific)
+* uint64_t timestamp : timestamp of when the file was created
+* uint64_t flags : a bitmask of flags
+
+The flags currently defined are as follows:
+ * bit 0: JITDUMP_FLAGS_ARCH_TIMESTAMP : set if the jitdump file is using an architecture-specific timestamp clock source. For instance, on x86, one could use TSC directly
+
+IV/ Record header
+
+The file header is immediately followed by records. Each record starts with a fixed size header describing the record that follows.
+
+The record header is specified in order as follows:
+* uint32_t id : a value identifying the record type (see below)
+* uint32_t total_size: the size in bytes of the record including the header.
+* uint64_t timestamp : a timestamp of when the record was created.
+
+The following record types are defined:
+ * Value 0 : JIT_CODE_LOAD : record describing a jitted function
+ * Value 1 : JIT_CODE_MOVE : record describing an already jitted function which is moved
+ * Value 2 : JIT_CODE_DEBUG_INFO: record describing the debug information for a jitted function
+ * Value 3 : JIT_CODE_CLOSE : record marking the end of the jit runtime (optional)
+ * Value 4 : JIT_CODE_UNWINDING_INFO: record describing a function unwinding information
+
+ The payload of the record must immediately follow the record header without padding.
+
+V/ JIT_CODE_LOAD record
+
+
+ The record has the following fields following the fixed-size record header in order:
+ * uint32_t pid: OS process id of the runtime generating the jitted code
+ * uint32_t tid: OS thread identification of the runtime thread generating the jitted code
+ * uint64_t vma: virtual address of jitted code start
+ * uint64_t code_addr: code start address for the jitted code. By default vma = code_addr
+ * uint64_t code_size: size in bytes of the generated jitted code
+ * uint64_t code_index: unique identifier for the jitted code (see below)
+ * char[n]: function name in ASCII including the null termination
+ * native code: raw byte encoding of the jitted code
+
+ The record header total_size field is inclusive of all components:
+ * record header
+ * fixed-sized fields
+ * function name string, including termination
+ * native code length
+ * record specific variable data (e.g., array of data entries)
+
+The code_index is used to uniquely identify each jitted function. The index can be a monotonically increasing 64-bit value. Each time a function is jitted it gets a new number. This value is used in case the code for a function is moved and avoids having to issue another JIT_CODE_LOAD record.
+
+The format supports empty functions with no native code.
+
+
+VI/ JIT_CODE_MOVE record
+
+ The record type is optional.
+
+ The record has the following fields following the fixed-size record header in order:
+ * uint32_t pid : OS process id of the runtime generating the jitted code
+ * uint32_t tid : OS thread identification of the runtime thread generating the jitted code
+ * uint64_t vma : new virtual address of jitted code start
+ * uint64_t old_code_addr: previous code address for the same function
+ * uint64_t new_code_addr: alternate new code started address for the jitted code. By default it should be equal to the vma address.
+ * uint64_t code_size : size in bytes of the jitted code
+ * uint64_t code_index : index referring to the JIT_CODE_LOAD code_index record of when the function was initially jitted
+
+
+The MOVE record can be used in case an already jitted function is simply moved by the runtime inside the code cache.
+
+The JIT_CODE_MOVE record cannot come before the JIT_CODE_LOAD record for the same function name. The function cannot have changed name, otherwise a new JIT_CODE_LOAD record must be emitted.
+
+The code size of the function cannot change.
+
+
+VII/ JIT_DEBUG_INFO record
+
+The record type is optional.
+
+The record contains source lines debug information, i.e., a way to map a code address back to a source line. This information may be used by the performance tool.
+
+The record has the following fields following the fixed-size record header in order:
+ * uint64_t code_addr: address of function for which the debug information is generated
+ * uint64_t nr_entry : number of debug entries for the function
+ * debug_entry[n]: array of nr_entry debug entries for the function
+
+The debug_entry describes the source line information. It is defined as follows in order:
+* uint64_t code_addr: address of function for which the debug information is generated
+* uint32_t line : source file line number (starting at 1)
+* uint32_t discrim : column discriminator, 0 is default
+* char name[n] : source file name in ASCII, including null termination
+
+The debug_entry entries are saved in sequence but given that they have variable sizes due to the file name string, they cannot be indexed directly.
+They need to be walked sequentially. The next debug_entry is found at sizeof(debug_entry) + strlen(name) + 1.
+
+IMPORTANT:
+ The JIT_CODE_DEBUG for a given function must always be generated BEFORE the JIT_CODE_LOAD for the function. This facilitates greatly the parser for the jitdump file.
+
+
+VIII/ JIT_CODE_CLOSE record
+
+
+The record type is optional.
+
+The record is used as a marker for the end of the jitted runtime. It can be replaced by the end of the file.
+
+The JIT_CODE_CLOSE record does not have any specific fields, the record header contains all the information needed.
+
+
+IX/ JIT_CODE_UNWINDING_INFO
+
+
+The record type is optional.
+
+The record is used to describe the unwinding information for a jitted function.
+
+The record has the following fields following the fixed-size record header in order:
+
+uint64_t unwind_data_size : the size in bytes of the unwinding data table at the end of the record
+uint64_t eh_frame_hdr_size : the size in bytes of the DWARF EH Frame Header at the start of the unwinding data table at the end of the record
+uint64_t mapped_size : the size of the unwinding data mapped in memory
+const char unwinding_data[n]: an array of unwinding data, consisting of the EH Frame Header, followed by the actual EH Frame
+
+
+The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
+
+
+The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+
+
+NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
new file mode 100644
index 000000000000..3f06730c7f47
--- /dev/null
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -0,0 +1,290 @@
+perf-c2c(1)
+===========
+
+NAME
+----
+perf-c2c - Shared Data C2C/HITM Analyzer.
+
+SYNOPSIS
+--------
+[verse]
+'perf c2c record' [<options>] <command>
+'perf c2c record' [<options>] -- [<record command options>] <command>
+'perf c2c report' [<options>]
+
+DESCRIPTION
+-----------
+C2C stands for Cache To Cache.
+
+The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows
+you to track down the cacheline contentions.
+
+The tool is based on x86's load latency and precise store facility events
+provided by Intel CPUs. These events provide:
+ - memory address of the access
+ - type of the access (load and store details)
+ - latency (in cycles) of the load access
+
+The c2c tool provide means to record this data and report back access details
+for cachelines with highest contention - highest number of HITM accesses.
+
+The basic workflow with this tool follows the standard record/report phase.
+User uses the record command to record events data and report command to
+display it.
+
+
+RECORD OPTIONS
+--------------
+-e::
+--event=::
+ Select the PMU event. Use 'perf mem record -e list'
+ to list available events.
+
+-v::
+--verbose::
+ Be more verbose (show counter open errors, etc).
+
+-l::
+--ldlat::
+ Configure mem-loads latency.
+
+-k::
+--all-kernel::
+ Configure all used events to run in kernel space.
+
+-u::
+--all-user::
+ Configure all used events to run in user space.
+
+REPORT OPTIONS
+--------------
+-k::
+--vmlinux=<file>::
+ vmlinux pathname
+
+-v::
+--verbose::
+ Be more verbose (show counter open errors, etc).
+
+-i::
+--input::
+ Specify the input file to process.
+
+-N::
+--node-info::
+ Show extra node info in report (see NODE INFO section)
+
+-c::
+--coalesce::
+ Specify sorintg fields for single cacheline display.
+ Following fields are available: tid,pid,iaddr,dso
+ (see COALESCE)
+
+-g::
+--call-graph::
+ Setup callchains parameters.
+ Please refer to perf-report man page for details.
+
+--stdio::
+ Force the stdio output (see STDIO OUTPUT)
+
+--stats::
+ Display only statistic tables and force stdio mode.
+
+--full-symbols::
+ Display full length of symbols.
+
+--no-source::
+ Do not display Source:Line column.
+
+--show-all::
+ Show all captured HITM lines, with no regard to HITM % 0.0005 limit.
+
+-f::
+--force::
+ Don't do ownership validation.
+
+-d::
+--display::
+ Siwtch to HITM type (rmt, lcl) to display and sort on. Total HITMs as default.
+
+C2C RECORD
+----------
+The perf c2c record command setup options related to HITM cacheline analysis
+and calls standard perf record command.
+
+Following perf record options are configured by default:
+(check perf record man page for details)
+
+ -W,-d,--sample-cpu
+
+Unless specified otherwise with '-e' option, following events are monitored by
+default:
+
+ cpu/mem-loads,ldlat=30/P
+ cpu/mem-stores/P
+
+User can pass any 'perf record' option behind '--' mark, like (to enable
+callchains and system wide monitoring):
+
+ $ perf c2c record -- -g -a
+
+Please check RECORD OPTIONS section for specific c2c record options.
+
+C2C REPORT
+----------
+The perf c2c report command displays shared data analysis. It comes in two
+display modes: stdio and tui (default).
+
+The report command workflow is following:
+ - sort all the data based on the cacheline address
+ - store access details for each cacheline
+ - sort all cachelines based on user settings
+ - display data
+
+In general perf report output consist of 2 basic views:
+ 1) most expensive cachelines list
+ 2) offsets details for each cacheline
+
+For each cacheline in the 1) list we display following data:
+(Both stdio and TUI modes follow the same fields output)
+
+ Index
+ - zero based index to identify the cacheline
+
+ Cacheline
+ - cacheline address (hex number)
+
+ Total records
+ - sum of all cachelines accesses
+
+ Rmt/Lcl Hitm
+ - cacheline percentage of all Remote/Local HITM accesses
+
+ LLC Load Hitm - Total, Lcl, Rmt
+ - count of Total/Local/Remote load HITMs
+
+ Store Reference - Total, L1Hit, L1Miss
+ Total - all store accesses
+ L1Hit - store accesses that hit L1
+ L1Hit - store accesses that missed L1
+
+ Load Dram
+ - count of local and remote DRAM accesses
+
+ LLC Ld Miss
+ - count of all accesses that missed LLC
+
+ Total Loads
+ - sum of all load accesses
+
+ Core Load Hit - FB, L1, L2
+ - count of load hits in FB (Fill Buffer), L1 and L2 cache
+
+ LLC Load Hit - Llc, Rmt
+ - count of LLC and Remote load hits
+
+For each offset in the 2) list we display following data:
+
+ HITM - Rmt, Lcl
+ - % of Remote/Local HITM accesses for given offset within cacheline
+
+ Store Refs - L1 Hit, L1 Miss
+ - % of store accesses that hit/missed L1 for given offset within cacheline
+
+ Data address - Offset
+ - offset address
+
+ Pid
+ - pid of the process responsible for the accesses
+
+ Tid
+ - tid of the process responsible for the accesses
+
+ Code address
+ - code address responsible for the accesses
+
+ cycles - rmt hitm, lcl hitm, load
+ - sum of cycles for given accesses - Remote/Local HITM and generic load
+
+ cpu cnt
+ - number of cpus that participated on the access
+
+ Symbol
+ - code symbol related to the 'Code address' value
+
+ Shared Object
+ - shared object name related to the 'Code address' value
+
+ Source:Line
+ - source information related to the 'Code address' value
+
+ Node
+ - nodes participating on the access (see NODE INFO section)
+
+NODE INFO
+---------
+The 'Node' field displays nodes that accesses given cacheline
+offset. Its output comes in 3 flavors:
+ - node IDs separated by ','
+ - node IDs with stats for each ID, in following format:
+ Node{cpus %hitms %stores}
+ - node IDs with list of affected CPUs in following format:
+ Node{cpu list}
+
+User can switch between above flavors with -N option or
+use 'n' key to interactively switch in TUI mode.
+
+COALESCE
+--------
+User can specify how to sort offsets for cacheline.
+
+Following fields are available and governs the final
+output fields set for caheline offsets output:
+
+ tid - coalesced by process TIDs
+ pid - coalesced by process PIDs
+ iaddr - coalesced by code address, following fields are displayed:
+ Code address, Code symbol, Shared Object, Source line
+ dso - coalesced by shared object
+
+By default the coalescing is setup with 'pid,tid,iaddr'.
+
+STDIO OUTPUT
+------------
+The stdio output displays data on standard output.
+
+Following tables are displayed:
+ Trace Event Information
+ - overall statistics of memory accesses
+
+ Global Shared Cache Line Event Information
+ - overall statistics on shared cachelines
+
+ Shared Data Cache Line Table
+ - list of most expensive cachelines
+
+ Shared Cache Line Distribution Pareto
+ - list of all accessed offsets for each cacheline
+
+TUI OUTPUT
+----------
+The TUI output provides interactive interface to navigate
+through cachelines list and to display offset details.
+
+For details please refer to the help window by pressing '?' key.
+
+CREDITS
+-------
+Although Don Zickus, Dick Fowles and Joe Mario worked together
+to get this implemented, we got lots of early help from Arnaldo
+Carvalho de Melo, Stephane Eranian, Jiri Olsa and Andi Kleen.
+
+C2C BLOG
+--------
+Check Joe's blog on c2c tool for detailed use case explanation:
+ https://joemario.github.io/blog/2016/09/01/c2c-blog/
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-mem[1]
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index cb081ac59fd1..9365b75fd04f 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -8,6 +8,8 @@ perf-config - Get and set variables in a configuration file.
SYNOPSIS
--------
[verse]
+'perf config' [<file-option>] [section.name[=value] ...]
+or
'perf config' [<file-option>] -l | --list
DESCRIPTION
@@ -118,6 +120,39 @@ Given a $HOME/.perfconfig like this:
children = true
group = true
+You can hide source code of annotate feature setting the config to false with
+
+ % perf config annotate.hide_src_code=true
+
+If you want to add or modify several config items, you can do like
+
+ % perf config ui.show-headers=false kmem.default=slab
+
+To modify the sort order of report functionality in user config file(i.e. `~/.perfconfig`), do
+
+ % perf config --user report sort-order=srcline
+
+To change colors of selected line to other foreground and background colors
+in system config file (i.e. `$(sysconf)/perfconfig`), do
+
+ % perf config --system colors.selected=yellow,green
+
+To query the record mode of call graph, do
+
+ % perf config call-graph.record-mode
+
+If you want to know multiple config key/value pairs, you can do like
+
+ % perf config report.queue-size call-graph.order report.children
+
+To query the config value of sort order of call graph in user config file (i.e. `~/.perfconfig`), do
+
+ % perf config --user call-graph.sort-order
+
+To query the config value of buildid directory in system config file (i.e. `$(sysconf)/perfconfig`), do
+
+ % perf config --system buildid.dir
+
Variables
~~~~~~~~~
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index ff0f433b3fce..479fc3261a50 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -61,6 +61,13 @@ OPTIONS
default, but this option shows live (currently allocated) pages
instead. (This option works with --page option only)
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
SEE ALSO
--------
linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 92335193dc33..27fc3617c6a4 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -45,9 +45,9 @@ OPTIONS
param1 and param2 are defined as formats for the PMU in:
/sys/bus/event_source/devices/<pmu>/format/*
- There are also some params which are not defined in .../<pmu>/format/*.
+ There are also some parameters which are not defined in .../<pmu>/format/*.
These params can be used to overload default config values per event.
- Here is a list of the params.
+ Here are some common parameters:
- 'period': Set event sampling period
- 'freq': Set event sampling frequency
- 'time': Disable/enable time stamping. Acceptable values are 1 for
@@ -57,8 +57,11 @@ OPTIONS
FP mode, "dwarf" for DWARF mode, "lbr" for LBR mode and
"no" for disable callgraph.
- 'stack-size': user stack size for dwarf mode
+
+ See the linkperf:perf-list[1] man page for more parameters.
+
Note: If user explicitly sets options which conflict with the params,
- the value set by the params will be overridden.
+ the value set by the parameters will be overridden.
Also not defined in .../<pmu>/format/* are PMU driver specific
configuration parameters. Any configuration parameter preceded by
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 2d1746295abf..f2914f03ae7b 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -239,7 +239,8 @@ OPTIONS
Accumulate callchain of children to parent entry so that then can
show up in the output. The output will have a new "Children" column
and will be sorted on the data. It requires callchains are recorded.
- See the `overhead calculation' section for more details.
+ See the `overhead calculation' section for more details. Enabled by
+ default, disable with --no-children.
--max-stack::
Set the stack depth limit when parsing the callchain, anything
@@ -382,6 +383,13 @@ OPTIONS
--header-only::
Show only perf.data header (forces --stdio).
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
--itrace::
Options for decoding instruction tracing data. The options are:
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
index 1cc08cc47ac5..7775b1eb2bee 100644
--- a/tools/perf/Documentation/perf-sched.txt
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -8,11 +8,11 @@ perf-sched - Tool to trace/measure scheduler properties (latencies)
SYNOPSIS
--------
[verse]
-'perf sched' {record|latency|map|replay|script}
+'perf sched' {record|latency|map|replay|script|timehist}
DESCRIPTION
-----------
-There are five variants of perf sched:
+There are several variants of 'perf sched':
'perf sched record <command>' to record the scheduling events
of an arbitrary workload.
@@ -36,6 +36,30 @@ There are five variants of perf sched:
are running on a CPU. A '*' denotes the CPU that had the event, and
a dot signals an idle CPU.
+ 'perf sched timehist' provides an analysis of scheduling events.
+
+ Example usage:
+ perf sched record -- sleep 1
+ perf sched timehist
+
+ By default it shows the individual schedule events, including the wait
+ time (time between sched-out and next sched-in events for the task), the
+ task scheduling delay (time between wakeup and actually running) and run
+ time for the task:
+
+ time cpu task name wait time sch delay run time
+ [tid/pid] (msec) (msec) (msec)
+ -------------- ------ -------------------- --------- --------- ---------
+ 79371.874569 [0011] gcc[31949] 0.014 0.000 1.148
+ 79371.874591 [0010] gcc[31951] 0.000 0.000 0.024
+ 79371.874603 [0010] migration/10[59] 3.350 0.004 0.011
+ 79371.874604 [0011] <idle> 1.148 0.000 0.035
+ 79371.874723 [0005] <idle> 0.016 0.000 1.383
+ 79371.874746 [0005] gcc[31949] 0.153 0.078 0.022
+ ...
+
+ Times are in msec.usec.
+
OPTIONS
-------
-i::
@@ -66,6 +90,56 @@ OPTIONS for 'perf sched map'
--color-pids::
Highlight the given pids.
+OPTIONS for 'perf sched timehist'
+---------------------------------
+-k::
+--vmlinux=<file>::
+ vmlinux pathname
+
+--kallsyms=<file>::
+ kallsyms pathname
+
+-g::
+--no-call-graph::
+ Do not display call chains if present.
+
+--max-stack::
+ Maximum number of functions to display in backtrace, default 5.
+
+-s::
+--summary::
+ Show only a summary of scheduling by thread with min, max, and average
+ run times (in sec) and relative stddev.
+
+-S::
+--with-summary::
+ Show all scheduling events followed by a summary by thread with min,
+ max, and average run times (in sec) and relative stddev.
+
+--symfs=<directory>::
+ Look for files with symbols relative to this directory.
+
+-V::
+--cpu-visual::
+ Show visual aid for sched switches by CPU: 'i' marks idle time,
+ 's' are scheduler events.
+
+-w::
+--wakeups::
+ Show wakeup events.
+
+-M::
+--migrations::
+ Show migration events.
+
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
+
SEE ALSO
--------
linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 053bbbd84ece..5dc5c6a09ac4 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -117,7 +117,7 @@ OPTIONS
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, brstack, brstacksym, flags, bpf-output,
- callindent. Field list can be prepended with the type, trace, sw or hw,
+ callindent, insn, insnlen. Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@@ -181,6 +181,10 @@ OPTIONS
Instruction Trace decoding. For calls and returns, it will display the
name of the symbol indented with spaces to reflect the stack depth.
+ When doing instruction trace decoding insn and insnlen give the
+ instruction bytes and the instruction length of the current
+ instruction.
+
Finally, a user may not set fields to none for all event types.
i.e., -F "" is not allowed.
@@ -208,6 +212,9 @@ OPTIONS
--hide-call-graph::
When printing symbols do not display call chain.
+--stop-bt::
+ Stop display of callgraph at these symbols
+
-C::
--cpu:: Only report samples for the list of CPUs provided. Multiple CPUs can
be provided as a comma-separated list with no space: 0,1. Ranges of
@@ -285,6 +292,13 @@ include::itrace.txt[]
--force::
Don't do ownership validation.
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 91d638df3a6b..e71d63843f45 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -170,6 +170,7 @@ Default is to monitor all CPUS.
show up in the output. The output will have a new "Children" column
and will be sorted on the data. It requires -g/--call-graph option
enabled. See the `overhead calculation' section for more details.
+ Enabled by default, disable with --no-children.
--max-stack::
Set the stack depth limit when parsing the callchain, anything
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 1ab0782369b1..781b019751a4 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -39,6 +39,11 @@ OPTIONS
Prefixing with ! shows all syscalls but the ones specified. You may
need to escape it.
+-D msecs::
+--delay msecs::
+After starting the program, wait msecs before measuring. This is useful to
+filter out the startup phase of the program, which is often very different.
+
-o::
--output=::
Output file name.
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 0bda2cca2b3a..a511e5f31e36 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -51,6 +51,7 @@ tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h
tools/include/asm-generic/bitops/const_hweight.h
tools/include/asm-generic/bitops/__ffs.h
+tools/include/asm-generic/bitops/__ffz.h
tools/include/asm-generic/bitops/__fls.h
tools/include/asm-generic/bitops/find.h
tools/include/asm-generic/bitops/fls64.h
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 72edf83d76b7..76c84f0eec52 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -136,6 +136,7 @@ endif
# Treat warnings as errors unless directed not to
ifneq ($(WERROR),0)
CFLAGS += -Werror
+ CXXFLAGS += -Werror
endif
ifndef DEBUG
@@ -182,6 +183,13 @@ CFLAGS += -Wall
CFLAGS += -Wextra
CFLAGS += -std=gnu99
+CXXFLAGS += -std=gnu++11 -fno-exceptions -fno-rtti
+CXXFLAGS += -Wall
+CXXFLAGS += -fno-omit-frame-pointer
+CXXFLAGS += -ggdb3
+CXXFLAGS += -funwind-tables
+CXXFLAGS += -Wno-strict-aliasing
+
# Enforce a non-executable stack, as we may regress (again) in the future by
# adding assembler files missing the .GNU-stack linker note.
LDFLAGS += -Wl,-z,noexecstack
@@ -204,24 +212,27 @@ ifeq ($(DEBUG),0)
endif
endif
-CFLAGS += -I$(src-perf)/util/include
-CFLAGS += -I$(src-perf)/arch/$(ARCH)/include
-CFLAGS += -I$(srctree)/tools/include/uapi
-CFLAGS += -I$(srctree)/tools/include/
-CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi
-CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/
-CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/
+INC_FLAGS += -I$(src-perf)/util/include
+INC_FLAGS += -I$(src-perf)/arch/$(ARCH)/include
+INC_FLAGS += -I$(srctree)/tools/include/uapi
+INC_FLAGS += -I$(srctree)/tools/include/
+INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi
+INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/
+INC_FLAGS += -I$(srctree)/tools/arch/$(ARCH)/
# $(obj-perf) for generated common-cmds.h
# $(obj-perf)/util for generated bison/flex headers
ifneq ($(OUTPUT),)
-CFLAGS += -I$(obj-perf)/util
-CFLAGS += -I$(obj-perf)
+INC_FLAGS += -I$(obj-perf)/util
+INC_FLAGS += -I$(obj-perf)
endif
-CFLAGS += -I$(src-perf)/util
-CFLAGS += -I$(src-perf)
-CFLAGS += -I$(srctree)/tools/lib/
+INC_FLAGS += -I$(src-perf)/util
+INC_FLAGS += -I$(src-perf)
+INC_FLAGS += -I$(srctree)/tools/lib/
+
+CFLAGS += $(INC_FLAGS)
+CXXFLAGS += $(INC_FLAGS)
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
@@ -366,7 +377,7 @@ ifndef NO_SDT
endif
ifdef PERF_HAVE_JITDUMP
- ifndef NO_DWARF
+ ifndef NO_LIBELF
$(call detected,CONFIG_JITDUMP)
CFLAGS += -DHAVE_JITDUMP
endif
@@ -758,6 +769,62 @@ ifndef NO_AUXTRACE
endif
endif
+ifndef NO_JVMTI
+ ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
+ JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
+ else
+ ifneq (,$(wildcard /usr/sbin/alternatives))
+ JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+ endif
+ endif
+ ifndef JDIR
+ $(warning No alternatives command found, you need to set JDIR= to point to the root of your Java directory)
+ NO_JVMTI := 1
+ endif
+endif
+
+ifndef NO_JVMTI
+ FEATURE_CHECK_CFLAGS-jvmti := -I$(JDIR)/include -I$(JDIR)/include/linux
+ $(call feature_check,jvmti)
+ ifeq ($(feature-jvmti), 1)
+ $(call detected_var,JDIR)
+ else
+ $(warning No openjdk development package found, please install JDK package)
+ NO_JVMTI := 1
+ endif
+endif
+
+USE_CXX = 0
+USE_CLANGLLVM = 0
+ifdef LIBCLANGLLVM
+ $(call feature_check,cxx)
+ ifneq ($(feature-cxx), 1)
+ msg := $(warning No g++ found, disable clang and llvm support. Please install g++)
+ else
+ $(call feature_check,llvm)
+ $(call feature_check,llvm-version)
+ ifneq ($(feature-llvm), 1)
+ msg := $(warning No suitable libLLVM found, disabling builtin clang and LLVM support. Please install llvm-dev(el) (>= 3.9.0))
+ else
+ $(call feature_check,clang)
+ ifneq ($(feature-clang), 1)
+ msg := $(warning No suitable libclang found, disabling builtin clang and LLVM support. Please install libclang-dev(el) (>= 3.9.0))
+ else
+ CFLAGS += -DHAVE_LIBCLANGLLVM_SUPPORT
+ CXXFLAGS += -DHAVE_LIBCLANGLLVM_SUPPORT -I$(shell $(LLVM_CONFIG) --includedir)
+ $(call detected,CONFIG_CXX)
+ $(call detected,CONFIG_CLANGLLVM)
+ USE_CXX = 1
+ USE_LLVM = 1
+ USE_CLANG = 1
+ ifneq ($(feature-llvm-version),1)
+ msg := $(warning This version of LLVM is not tested. May cause build errors)
+ endif
+ endif
+ endif
+ endif
+endif
+
# Among the variables below, these:
# perfexecdir
# template_dir
@@ -850,6 +917,7 @@ ifeq ($(VF),1)
$(call print_var,sysconfdir)
$(call print_var,LIBUNWIND_DIR)
$(call print_var,LIBDW_DIR)
+ $(call print_var,JDIR)
ifeq ($(dwarf-post-unwind),1)
$(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 982d6439bb07..8f1c258b151a 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -86,6 +86,12 @@ include ../scripts/utilities.mak
#
# Define FEATURES_DUMP to provide features detection dump file
# and bypass the feature detection
+#
+# Define NO_JVMTI if you do not want jvmti agent built
+#
+# Define LIBCLANGLLVM if you DO want builtin clang and llvm support.
+# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
+# llvm-config is not in $PATH.
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -122,10 +128,6 @@ endif
# (this improves performance and avoids hard-to-debug behaviour);
MAKEFLAGS += -r
-$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
- $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
- $(Q)touch $(OUTPUT)PERF-VERSION-FILE
-
# Makefiles suck: This macro sets a default value of $(2) for the
# variable named by $(1), unless the variable has been set by
# environment or command line. This is necessary for CC and AR
@@ -141,6 +143,7 @@ endef
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar)
$(call allow-override,LD,$(CROSS_COMPILE)ld)
+$(call allow-override,CXX,$(CROSS_COMPILE)g++)
LD += $(EXTRA_LDFLAGS)
@@ -149,6 +152,7 @@ HOSTLD ?= ld
HOSTAR ?= ar
PKG_CONFIG = $(CROSS_COMPILE)pkg-config
+LLVM_CONFIG ?= llvm-config
RM = rm -f
LN = ln -f
@@ -160,16 +164,11 @@ BISON = bison
STRIP = strip
AWK = awk
-LIB_DIR = $(srctree)/tools/lib/api/
-TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
-BPF_DIR = $(srctree)/tools/lib/bpf/
-SUBCMD_DIR = $(srctree)/tools/lib/subcmd/
-
# include Makefile.config by default and rule out
# non-config cases
config := 1
-NON_CONFIG_TARGETS := clean TAGS tags cscope help install-doc
+NON_CONFIG_TARGETS := clean TAGS tags cscope help install-doc install-man install-html install-info install-pdf doc man html info pdf
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
@@ -177,6 +176,40 @@ ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
endif
endif
+# The fixdep build - we force fixdep tool to be built as
+# the first target in the separate make session not to be
+# disturbed by any parallel make jobs. Once fixdep is done
+# we issue the requested build with FIXDEP=1 variable.
+#
+# The fixdep build is disabled for $(NON_CONFIG_TARGETS)
+# targets, because it's not necessary.
+
+ifdef FIXDEP
+ force_fixdep := 0
+else
+ force_fixdep := $(config)
+endif
+
+export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
+export HOSTCC HOSTLD HOSTAR
+
+include $(srctree)/tools/build/Makefile.include
+
+ifeq ($(force_fixdep),1)
+goals := $(filter-out all sub-make, $(MAKECMDGOALS))
+
+$(goals) all: sub-make
+
+sub-make: fixdep
+ $(Q)$(MAKE) FIXDEP=1 -f Makefile.perf $(goals)
+
+else # force_fixdep
+
+LIB_DIR = $(srctree)/tools/lib/api/
+TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
+BPF_DIR = $(srctree)/tools/lib/bpf/
+SUBCMD_DIR = $(srctree)/tools/lib/subcmd/
+
# Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
# Without this setting the output feature dump file misses some features, for
# example, liberty. Select all checkers so we won't get an incomplete feature
@@ -260,17 +293,6 @@ python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT
PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPI)
-$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
- $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
- CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
- $(PYTHON_WORD) util/setup.py \
- --quiet build_ext; \
- mkdir -p $(OUTPUT)python && \
- cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/
-#
-# No Perl scripts right now:
-#
-
SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
PROGRAMS += $(OUTPUT)perf
@@ -283,6 +305,12 @@ ifndef NO_PERF_READ_VDSOX32
PROGRAMS += $(OUTPUT)perf-read-vdsox32
endif
+LIBJVMTI = libperf-jvmti.so
+
+ifndef NO_JVMTI
+PROGRAMS += $(OUTPUT)$(LIBJVMTI)
+endif
+
# what 'all' will build and 'install' will install, in perfexecdir
ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
@@ -317,11 +345,6 @@ endif
ifndef NO_GTK2
ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so
GTK_IN := $(OUTPUT)gtk-in.o
-
-install-gtk: $(OUTPUT)libperf-gtk.so
- $(call QUIET_INSTALL, 'GTK UI') \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
- $(INSTALL) $(OUTPUT)libperf-gtk.so '$(DESTDIR_SQ)$(libdir_SQ)'
endif
ifdef ASCIIDOC8
@@ -330,6 +353,21 @@ endif
LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
+ifeq ($(USE_CLANG), 1)
+ CLANGLIBS_LIST = AST Basic CodeGen Driver Frontend Lex Tooling Edit Sema Analysis Parse Serialization
+ LIBCLANG = $(foreach l,$(CLANGLIBS_LIST),$(wildcard $(shell $(LLVM_CONFIG) --libdir)/libclang$(l).a))
+ LIBS += -Wl,--start-group $(LIBCLANG) -Wl,--end-group
+endif
+
+ifeq ($(USE_LLVM), 1)
+ LIBLLVM = $(shell $(LLVM_CONFIG) --libs all) $(shell $(LLVM_CONFIG) --system-libs)
+ LIBS += -L$(shell $(LLVM_CONFIG) --libdir) $(LIBLLVM)
+endif
+
+ifeq ($(USE_CXX), 1)
+ LIBS += -lstdc++
+endif
+
export INSTALL SHELL_PATH
### Build rules
@@ -338,6 +376,14 @@ SHELL = $(SHELL_PATH)
all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
+$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
+ $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
+ CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
+ $(PYTHON_WORD) util/setup.py \
+ --quiet build_ext; \
+ mkdir -p $(OUTPUT)python && \
+ cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/
+
please_set_SHELL_PATH_to_a_more_modern_shell:
$(Q)$$(:)
@@ -348,10 +394,6 @@ strip: $(PROGRAMS) $(OUTPUT)perf
PERF_IN := $(OUTPUT)perf-in.o
-export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
-export HOSTCC HOSTLD HOSTAR
-include $(srctree)/tools/build/Makefile.include
-
JEVENTS := $(OUTPUT)pmu-events/jevents
JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o
@@ -381,10 +423,10 @@ $(PERF_IN): prepare FORCE
(diff -B ../arch/x86/include/asm/cpufeatures.h ../../arch/x86/include/asm/cpufeatures.h >/dev/null) \
|| echo "Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel" >&2 )) || true
@(test -f ../../arch/x86/lib/memcpy_64.S && ( \
- (diff -B ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \
+ (diff -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \
|| echo "Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel" >&2 )) || true
@(test -f ../../arch/x86/lib/memset_64.S && ( \
- (diff -B ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \
+ (diff -B -I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \
|| echo "Warning: tools/arch/x86/lib/memset_64.S differs from kernel" >&2 )) || true
@(test -f ../../arch/arm/include/uapi/asm/perf_regs.h && ( \
(diff -B ../arch/arm/include/uapi/asm/perf_regs.h ../../arch/arm/include/uapi/asm/perf_regs.h >/dev/null) \
@@ -470,7 +512,7 @@ $(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_L
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
$(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
-$(GTK_IN): fixdep FORCE
+$(GTK_IN): FORCE
$(Q)$(MAKE) $(build)=gtk
$(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS)
@@ -484,6 +526,10 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
$(SCRIPTS) : % : %.sh
$(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
+$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
+ $(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
+ $(Q)touch $(OUTPUT)PERF-VERSION-FILE
+
# These can record PERF_VERSION
perf.spec $(SCRIPTS) \
: $(OUTPUT)PERF-VERSION-FILE
@@ -515,7 +561,7 @@ endif
__build-dir = $(subst $(OUTPUT),,$(dir $@))
build-dir = $(if $(__build-dir),$(__build-dir),.)
-prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h fixdep archheaders
+prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders
$(OUTPUT)%.o: %.c prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -551,11 +597,21 @@ $(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-vdso-map.c
$(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
endif
+ifndef NO_JVMTI
+LIBJVMTI_IN := $(OUTPUT)jvmti/jvmti-in.o
+
+$(LIBJVMTI_IN): FORCE
+ $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=jvmti obj=jvmti
+
+$(OUTPUT)$(LIBJVMTI): $(LIBJVMTI_IN)
+ $(QUIET_LINK)$(CC) -shared -Wl,-soname -Wl,$(LIBJVMTI) -o $@ $< -lelf -lrt
+endif
+
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
LIBPERF_IN := $(OUTPUT)libperf-in.o
-$(LIBPERF_IN): prepare fixdep FORCE
+$(LIBPERF_IN): prepare FORCE
$(Q)$(MAKE) $(build)=libperf
$(LIB_FILE): $(LIBPERF_IN)
@@ -563,10 +619,10 @@ $(LIB_FILE): $(LIBPERF_IN)
LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
-$(LIBTRACEEVENT): fixdep FORCE
+$(LIBTRACEEVENT): FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
-libtraceevent_plugins: fixdep FORCE
+libtraceevent_plugins: FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
@@ -579,21 +635,21 @@ $(LIBTRACEEVENT)-clean:
install-traceevent-plugins: libtraceevent_plugins
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
-$(LIBAPI): fixdep FORCE
+$(LIBAPI): FORCE
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
$(LIBAPI)-clean:
$(call QUIET_CLEAN, libapi)
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
-$(LIBBPF): fixdep FORCE
+$(LIBBPF): FORCE
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
-$(LIBSUBCMD): fixdep FORCE
+$(LIBSUBCMD): FORCE
$(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) $(OUTPUT)libsubcmd.a
$(LIBSUBCMD)-clean:
@@ -673,7 +729,14 @@ check: $(OUTPUT)common-cmds.h
### Installation rules
+ifndef NO_GTK2
+install-gtk: $(OUTPUT)libperf-gtk.so
+ $(call QUIET_INSTALL, 'GTK UI') \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
+ $(INSTALL) $(OUTPUT)libperf-gtk.so '$(DESTDIR_SQ)$(libdir_SQ)'
+else
install-gtk:
+endif
install-tools: all install-gtk
$(call QUIET_INSTALL, binaries) \
@@ -688,6 +751,10 @@ ifndef NO_PERF_READ_VDSOX32
$(call QUIET_INSTALL, perf-read-vdsox32) \
$(INSTALL) $(OUTPUT)perf-read-vdsox32 '$(DESTDIR_SQ)$(bindir_SQ)';
endif
+ifndef NO_JVMTI
+ $(call QUIET_INSTALL, $(LIBJVMTI)) \
+ $(INSTALL) $(OUTPUT)$(LIBJVMTI) '$(DESTDIR_SQ)$(libdir_SQ)';
+endif
$(call QUIET_INSTALL, libexec) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(call QUIET_INSTALL, perf-archive) \
@@ -754,7 +821,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(call QUIET_CLEAN, core-objs) $(RM) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
- $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents
+ $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
$(OUTPUT)util/intel-pt-decoder/inat-tables.c $(OUTPUT)fixdep \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
@@ -790,3 +857,4 @@ FORCE:
.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
.PHONY: libtraceevent_plugins archheaders
+endif # force_fixdep
diff --git a/tools/perf/arch/arm/annotate/instructions.c b/tools/perf/arch/arm/annotate/instructions.c
new file mode 100644
index 000000000000..1ce0872b1726
--- /dev/null
+++ b/tools/perf/arch/arm/annotate/instructions.c
@@ -0,0 +1,59 @@
+#include <sys/types.h>
+#include <regex.h>
+
+struct arm_annotate {
+ regex_t call_insn,
+ jump_insn;
+};
+
+static struct ins_ops *arm__associate_instruction_ops(struct arch *arch, const char *name)
+{
+ struct arm_annotate *arm = arch->priv;
+ struct ins_ops *ops;
+ regmatch_t match[2];
+
+ if (!regexec(&arm->call_insn, name, 2, match, 0))
+ ops = &call_ops;
+ else if (!regexec(&arm->jump_insn, name, 2, match, 0))
+ ops = &jump_ops;
+ else
+ return NULL;
+
+ arch__associate_ins_ops(arch, name, ops);
+ return ops;
+}
+
+static int arm__annotate_init(struct arch *arch)
+{
+ struct arm_annotate *arm;
+ int err;
+
+ if (arch->initialized)
+ return 0;
+
+ arm = zalloc(sizeof(*arm));
+ if (!arm)
+ return -1;
+
+#define ARM_CONDS "(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl|vc|vs)"
+ err = regcomp(&arm->call_insn, "^blx?" ARM_CONDS "?$", REG_EXTENDED);
+ if (err)
+ goto out_free_arm;
+ err = regcomp(&arm->jump_insn, "^bx?" ARM_CONDS "?$", REG_EXTENDED);
+ if (err)
+ goto out_free_call;
+#undef ARM_CONDS
+
+ arch->initialized = true;
+ arch->priv = arm;
+ arch->associate_instruction_ops = arm__associate_instruction_ops;
+ arch->objdump.comment_char = ';';
+ arch->objdump.skip_functions_char = '+';
+ return 0;
+
+out_free_call:
+ regfree(&arm->call_insn);
+out_free_arm:
+ free(arm);
+ return -1;
+}
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 47d584da5819..dfea6b635525 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -575,8 +575,6 @@ static FILE *cs_device__open_file(const char *name)
snprintf(path, PATH_MAX,
"%s" CS_BUS_DEVICE_PATH "%s", sysfs, name);
- printf("path: %s\n", path);
-
if (stat(path, &st) < 0)
return NULL;
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
new file mode 100644
index 000000000000..44eafd6f2d50
--- /dev/null
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -0,0 +1,62 @@
+#include <sys/types.h>
+#include <regex.h>
+
+struct arm64_annotate {
+ regex_t call_insn,
+ jump_insn;
+};
+
+static struct ins_ops *arm64__associate_instruction_ops(struct arch *arch, const char *name)
+{
+ struct arm64_annotate *arm = arch->priv;
+ struct ins_ops *ops;
+ regmatch_t match[2];
+
+ if (!regexec(&arm->jump_insn, name, 2, match, 0))
+ ops = &jump_ops;
+ else if (!regexec(&arm->call_insn, name, 2, match, 0))
+ ops = &call_ops;
+ else if (!strcmp(name, "ret"))
+ ops = &ret_ops;
+ else
+ return NULL;
+
+ arch__associate_ins_ops(arch, name, ops);
+ return ops;
+}
+
+static int arm64__annotate_init(struct arch *arch)
+{
+ struct arm64_annotate *arm;
+ int err;
+
+ if (arch->initialized)
+ return 0;
+
+ arm = zalloc(sizeof(*arm));
+ if (!arm)
+ return -1;
+
+ /* bl, blr */
+ err = regcomp(&arm->call_insn, "^blr?$", REG_EXTENDED);
+ if (err)
+ goto out_free_arm;
+ /* b, b.cond, br, cbz/cbnz, tbz/tbnz */
+ err = regcomp(&arm->jump_insn, "^[ct]?br?\\.?(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl)?n?z?$",
+ REG_EXTENDED);
+ if (err)
+ goto out_free_call;
+
+ arch->initialized = true;
+ arch->priv = arm;
+ arch->associate_instruction_ops = arm64__associate_instruction_ops;
+ arch->objdump.comment_char = ';';
+ arch->objdump.skip_functions_char = '+';
+ return 0;
+
+out_free_call:
+ regfree(&arm->call_insn);
+out_free_arm:
+ free(arm);
+ return -1;
+}
diff --git a/tools/perf/arch/powerpc/annotate/instructions.c b/tools/perf/arch/powerpc/annotate/instructions.c
new file mode 100644
index 000000000000..3c4004db81b9
--- /dev/null
+++ b/tools/perf/arch/powerpc/annotate/instructions.c
@@ -0,0 +1,58 @@
+static struct ins_ops *powerpc__associate_instruction_ops(struct arch *arch, const char *name)
+{
+ int i;
+ struct ins_ops *ops;
+
+ /*
+ * - Interested only if instruction starts with 'b'.
+ * - Few start with 'b', but aren't branch instructions.
+ */
+ if (name[0] != 'b' ||
+ !strncmp(name, "bcd", 3) ||
+ !strncmp(name, "brinc", 5) ||
+ !strncmp(name, "bper", 4))
+ return NULL;
+
+ ops = &jump_ops;
+
+ i = strlen(name) - 1;
+ if (i < 0)
+ return NULL;
+
+ /* ignore optional hints at the end of the instructions */
+ if (name[i] == '+' || name[i] == '-')
+ i--;
+
+ if (name[i] == 'l' || (name[i] == 'a' && name[i-1] == 'l')) {
+ /*
+ * if the instruction ends up with 'l' or 'la', then
+ * those are considered 'calls' since they update LR.
+ * ... except for 'bnl' which is branch if not less than
+ * and the absolute form of the same.
+ */
+ if (strcmp(name, "bnl") && strcmp(name, "bnl+") &&
+ strcmp(name, "bnl-") && strcmp(name, "bnla") &&
+ strcmp(name, "bnla+") && strcmp(name, "bnla-"))
+ ops = &call_ops;
+ }
+ if (name[i] == 'r' && name[i-1] == 'l')
+ /*
+ * instructions ending with 'lr' are considered to be
+ * return instructions
+ */
+ ops = &ret_ops;
+
+ arch__associate_ins_ops(arch, name, ops);
+ return ops;
+}
+
+static int powerpc__annotate_init(struct arch *arch)
+{
+ if (!arch->initialized) {
+ arch->initialized = true;
+ arch->associate_instruction_ops = powerpc__associate_instruction_ops;
+ arch->objdump.comment_char = '#';
+ }
+
+ return 0;
+}
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
new file mode 100644
index 000000000000..c1625f256df3
--- /dev/null
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -0,0 +1,78 @@
+static struct ins x86__instructions[] = {
+ { .name = "add", .ops = &mov_ops, },
+ { .name = "addl", .ops = &mov_ops, },
+ { .name = "addq", .ops = &mov_ops, },
+ { .name = "addw", .ops = &mov_ops, },
+ { .name = "and", .ops = &mov_ops, },
+ { .name = "bts", .ops = &mov_ops, },
+ { .name = "call", .ops = &call_ops, },
+ { .name = "callq", .ops = &call_ops, },
+ { .name = "cmp", .ops = &mov_ops, },
+ { .name = "cmpb", .ops = &mov_ops, },
+ { .name = "cmpl", .ops = &mov_ops, },
+ { .name = "cmpq", .ops = &mov_ops, },
+ { .name = "cmpw", .ops = &mov_ops, },
+ { .name = "cmpxch", .ops = &mov_ops, },
+ { .name = "dec", .ops = &dec_ops, },
+ { .name = "decl", .ops = &dec_ops, },
+ { .name = "imul", .ops = &mov_ops, },
+ { .name = "inc", .ops = &dec_ops, },
+ { .name = "incl", .ops = &dec_ops, },
+ { .name = "ja", .ops = &jump_ops, },
+ { .name = "jae", .ops = &jump_ops, },
+ { .name = "jb", .ops = &jump_ops, },
+ { .name = "jbe", .ops = &jump_ops, },
+ { .name = "jc", .ops = &jump_ops, },
+ { .name = "jcxz", .ops = &jump_ops, },
+ { .name = "je", .ops = &jump_ops, },
+ { .name = "jecxz", .ops = &jump_ops, },
+ { .name = "jg", .ops = &jump_ops, },
+ { .name = "jge", .ops = &jump_ops, },
+ { .name = "jl", .ops = &jump_ops, },
+ { .name = "jle", .ops = &jump_ops, },
+ { .name = "jmp", .ops = &jump_ops, },
+ { .name = "jmpq", .ops = &jump_ops, },
+ { .name = "jna", .ops = &jump_ops, },
+ { .name = "jnae", .ops = &jump_ops, },
+ { .name = "jnb", .ops = &jump_ops, },
+ { .name = "jnbe", .ops = &jump_ops, },
+ { .name = "jnc", .ops = &jump_ops, },
+ { .name = "jne", .ops = &jump_ops, },
+ { .name = "jng", .ops = &jump_ops, },
+ { .name = "jnge", .ops = &jump_ops, },
+ { .name = "jnl", .ops = &jump_ops, },
+ { .name = "jnle", .ops = &jump_ops, },
+ { .name = "jno", .ops = &jump_ops, },
+ { .name = "jnp", .ops = &jump_ops, },
+ { .name = "jns", .ops = &jump_ops, },
+ { .name = "jnz", .ops = &jump_ops, },
+ { .name = "jo", .ops = &jump_ops, },
+ { .name = "jp", .ops = &jump_ops, },
+ { .name = "jpe", .ops = &jump_ops, },
+ { .name = "jpo", .ops = &jump_ops, },
+ { .name = "jrcxz", .ops = &jump_ops, },
+ { .name = "js", .ops = &jump_ops, },
+ { .name = "jz", .ops = &jump_ops, },
+ { .name = "lea", .ops = &mov_ops, },
+ { .name = "lock", .ops = &lock_ops, },
+ { .name = "mov", .ops = &mov_ops, },
+ { .name = "movb", .ops = &mov_ops, },
+ { .name = "movdqa", .ops = &mov_ops, },
+ { .name = "movl", .ops = &mov_ops, },
+ { .name = "movq", .ops = &mov_ops, },
+ { .name = "movslq", .ops = &mov_ops, },
+ { .name = "movzbl", .ops = &mov_ops, },
+ { .name = "movzwl", .ops = &mov_ops, },
+ { .name = "nop", .ops = &nop_ops, },
+ { .name = "nopl", .ops = &nop_ops, },
+ { .name = "nopw", .ops = &nop_ops, },
+ { .name = "or", .ops = &mov_ops, },
+ { .name = "orl", .ops = &mov_ops, },
+ { .name = "test", .ops = &mov_ops, },
+ { .name = "testb", .ops = &mov_ops, },
+ { .name = "testl", .ops = &mov_ops, },
+ { .name = "xadd", .ops = &mov_ops, },
+ { .name = "xbeginl", .ops = &jump_ops, },
+ { .name = "xbeginq", .ops = &jump_ops, },
+ { .name = "retq", .ops = &ret_ops, },
+};
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 555263e385c9..e93ef0b38db8 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -335,6 +335,9 @@
326 common copy_file_range sys_copy_file_range
327 64 preadv2 sys_preadv2
328 64 pwritev2 sys_pwritev2
+329 common pkey_mprotect sys_pkey_mprotect
+330 common pkey_alloc sys_pkey_alloc
+331 common pkey_free sys_pkey_free
#
# x32-specific system call numbers start at 512 to avoid cache impact
@@ -374,5 +377,5 @@
543 x32 io_setup compat_sys_io_setup
544 x32 io_submit compat_sys_io_submit
545 x32 execveat compat_sys_execveat/ptregs
-534 x32 preadv2 compat_sys_preadv2
-535 x32 pwritev2 compat_sys_pwritev2
+546 x32 preadv2 compat_sys_preadv64v2
+547 x32 pwritev2 compat_sys_pwritev64v2
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index 2218cb64f840..99d66191e56c 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -4,27 +4,27 @@
struct test arch_tests[] = {
{
- .desc = "x86 rdpmc test",
+ .desc = "x86 rdpmc",
.func = test__rdpmc,
},
{
- .desc = "Test converting perf time to TSC",
+ .desc = "Convert perf time to TSC",
.func = test__perf_time_to_tsc,
},
#ifdef HAVE_DWARF_UNWIND_SUPPORT
{
- .desc = "Test dwarf unwind",
+ .desc = "DWARF unwind",
.func = test__dwarf_unwind,
},
#endif
#ifdef HAVE_AUXTRACE_SUPPORT
{
- .desc = "Test x86 instruction decoder - new instructions",
+ .desc = "x86 instruction decoder - new instructions",
.func = test__insn_x86,
},
#endif
{
- .desc = "Test intel cqm nmi context read",
+ .desc = "Intel cqm nmi context read",
.func = test__intel_cqm_count_nmi_context,
},
{
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index 8024cd5febd2..bfbb6b5f609c 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -63,8 +63,9 @@ static const char * const bench_futex_hash_usage[] = {
static void *workerfn(void *arg)
{
int ret;
- unsigned int i;
struct worker *w = (struct worker *) arg;
+ unsigned int i;
+ unsigned long ops = w->ops; /* avoid cacheline bouncing */
pthread_mutex_lock(&thread_lock);
threads_starting--;
@@ -74,7 +75,7 @@ static void *workerfn(void *arg)
pthread_mutex_unlock(&thread_lock);
do {
- for (i = 0; i < nfutexes; i++, w->ops++) {
+ for (i = 0; i < nfutexes; i++, ops++) {
/*
* We want the futex calls to fail in order to stress
* the hashing of uaddr and not measure other steps,
@@ -88,6 +89,7 @@ static void *workerfn(void *arg)
}
} while (!done);
+ w->ops = ops;
return NULL;
}
@@ -128,6 +130,8 @@ int bench_futex_hash(int argc, const char **argv,
}
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ nsecs = futexbench_sanitize_numeric(nsecs);
+ nfutexes = futexbench_sanitize_numeric(nfutexes);
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
@@ -135,6 +139,8 @@ int bench_futex_hash(int argc, const char **argv,
if (!nthreads) /* default to the number of CPUs */
nthreads = ncpus;
+ else
+ nthreads = futexbench_sanitize_numeric(nthreads);
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 936d89d30483..465012b320ee 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -75,6 +75,7 @@ static void toggle_done(int sig __maybe_unused,
static void *workerfn(void *arg)
{
struct worker *w = (struct worker *) arg;
+ unsigned long ops = w->ops;
pthread_mutex_lock(&thread_lock);
threads_starting--;
@@ -103,9 +104,10 @@ static void *workerfn(void *arg)
if (ret && !silent)
warn("thread %d: Could not unlock pi-lock for %p (%d)",
w->tid, w->futex, ret);
- w->ops++; /* account for thread's share of work */
+ ops++; /* account for thread's share of work */
} while (!done);
+ w->ops = ops;
return NULL;
}
@@ -150,6 +152,7 @@ int bench_futex_lock_pi(int argc, const char **argv,
goto err;
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ nsecs = futexbench_sanitize_numeric(nsecs);
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
@@ -157,6 +160,8 @@ int bench_futex_lock_pi(int argc, const char **argv,
if (!nthreads)
nthreads = ncpus;
+ else
+ nthreads = futexbench_sanitize_numeric(nthreads);
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index 2b9705a8734c..fd4ee95b689a 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -128,6 +128,8 @@ int bench_futex_requeue(int argc, const char **argv,
if (!nthreads)
nthreads = ncpus;
+ else
+ nthreads = futexbench_sanitize_numeric(nthreads);
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index 2c8fa67ad537..beaa6c142477 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -217,8 +217,12 @@ int bench_futex_wake_parallel(int argc, const char **argv,
sigaction(SIGINT, &act, NULL);
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ nwaking_threads = futexbench_sanitize_numeric(nwaking_threads);
+
if (!nblocked_threads)
nblocked_threads = ncpus;
+ else
+ nblocked_threads = futexbench_sanitize_numeric(nblocked_threads);
/* some sanity checks */
if (nwaking_threads > nblocked_threads || !nwaking_threads)
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index e246b1b8388a..46efcb98b5a4 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -129,6 +129,7 @@ int bench_futex_wake(int argc, const char **argv,
}
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ nwakes = futexbench_sanitize_numeric(nwakes);
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
@@ -136,6 +137,8 @@ int bench_futex_wake(int argc, const char **argv,
if (!nthreads)
nthreads = ncpus;
+ else
+ nthreads = futexbench_sanitize_numeric(nthreads);
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index b2e06d1190d0..ba7c735c0c62 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -7,6 +7,7 @@
#ifndef _FUTEX_H
#define _FUTEX_H
+#include <stdlib.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
@@ -99,4 +100,7 @@ static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr,
}
#endif
+/* User input sanitation */
+#define futexbench_sanitize_numeric(__n) abs((__n))
+
#endif /* _FUTEX_H */
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index c684910e5a48..52504a83b5a1 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -106,9 +106,10 @@ static double timeval2double(struct timeval *ts)
struct bench_mem_info {
const struct function *functions;
- u64 (*do_cycles)(const struct function *r, size_t size);
- double (*do_gettimeofday)(const struct function *r, size_t size);
+ u64 (*do_cycles)(const struct function *r, size_t size, void *src, void *dst);
+ double (*do_gettimeofday)(const struct function *r, size_t size, void *src, void *dst);
const char *const *usage;
+ bool alloc_src;
};
static void __bench_mem_function(struct bench_mem_info *info, int r_idx, size_t size, double size_total)
@@ -116,16 +117,26 @@ static void __bench_mem_function(struct bench_mem_info *info, int r_idx, size_t
const struct function *r = &info->functions[r_idx];
double result_bps = 0.0;
u64 result_cycles = 0;
+ void *src = NULL, *dst = zalloc(size);
printf("# function '%s' (%s)\n", r->name, r->desc);
+ if (dst == NULL)
+ goto out_alloc_failed;
+
+ if (info->alloc_src) {
+ src = zalloc(size);
+ if (src == NULL)
+ goto out_alloc_failed;
+ }
+
if (bench_format == BENCH_FORMAT_DEFAULT)
printf("# Copying %s bytes ...\n\n", size_str);
if (use_cycles) {
- result_cycles = info->do_cycles(r, size);
+ result_cycles = info->do_cycles(r, size, src, dst);
} else {
- result_bps = info->do_gettimeofday(r, size);
+ result_bps = info->do_gettimeofday(r, size, src, dst);
}
switch (bench_format) {
@@ -149,6 +160,14 @@ static void __bench_mem_function(struct bench_mem_info *info, int r_idx, size_t
BUG_ON(1);
break;
}
+
+out_free:
+ free(src);
+ free(dst);
+ return;
+out_alloc_failed:
+ printf("# Memory allocation failed - maybe size (%s) is too large?\n", size_str);
+ goto out_free;
}
static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *info)
@@ -201,28 +220,14 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
return 0;
}
-static void memcpy_alloc_mem(void **dst, void **src, size_t size)
-{
- *dst = zalloc(size);
- if (!*dst)
- die("memory allocation failed - maybe size is too large?\n");
-
- *src = zalloc(size);
- if (!*src)
- die("memory allocation failed - maybe size is too large?\n");
-
- /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
- memset(*src, 0, size);
-}
-
-static u64 do_memcpy_cycles(const struct function *r, size_t size)
+static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
{
u64 cycle_start = 0ULL, cycle_end = 0ULL;
- void *src = NULL, *dst = NULL;
memcpy_t fn = r->fn.memcpy;
int i;
- memcpy_alloc_mem(&dst, &src, size);
+ /* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
+ memset(src, 0, size);
/*
* We prefault the freshly allocated memory range here,
@@ -235,20 +240,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size)
fn(dst, src, size);
cycle_end = get_cycles();
- free(src);
- free(dst);
return cycle_end - cycle_start;
}
-static double do_memcpy_gettimeofday(const struct function *r, size_t size)
+static double do_memcpy_gettimeofday(const struct function *r, size_t size, void *src, void *dst)
{
struct timeval tv_start, tv_end, tv_diff;
memcpy_t fn = r->fn.memcpy;
- void *src = NULL, *dst = NULL;
int i;
- memcpy_alloc_mem(&dst, &src, size);
-
/*
* We prefault the freshly allocated memory range here,
* to not measure page fault overhead:
@@ -262,9 +262,6 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size)
timersub(&tv_end, &tv_start, &tv_diff);
- free(src);
- free(dst);
-
return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
}
@@ -294,27 +291,18 @@ int bench_mem_memcpy(int argc, const char **argv, const char *prefix __maybe_unu
.do_cycles = do_memcpy_cycles,
.do_gettimeofday = do_memcpy_gettimeofday,
.usage = bench_mem_memcpy_usage,
+ .alloc_src = true,
};
return bench_mem_common(argc, argv, &info);
}
-static void memset_alloc_mem(void **dst, size_t size)
-{
- *dst = zalloc(size);
- if (!*dst)
- die("memory allocation failed - maybe size is too large?\n");
-}
-
-static u64 do_memset_cycles(const struct function *r, size_t size)
+static u64 do_memset_cycles(const struct function *r, size_t size, void *src __maybe_unused, void *dst)
{
u64 cycle_start = 0ULL, cycle_end = 0ULL;
memset_t fn = r->fn.memset;
- void *dst = NULL;
int i;
- memset_alloc_mem(&dst, size);
-
/*
* We prefault the freshly allocated memory range here,
* to not measure page fault overhead:
@@ -326,19 +314,15 @@ static u64 do_memset_cycles(const struct function *r, size_t size)
fn(dst, i, size);
cycle_end = get_cycles();
- free(dst);
return cycle_end - cycle_start;
}
-static double do_memset_gettimeofday(const struct function *r, size_t size)
+static double do_memset_gettimeofday(const struct function *r, size_t size, void *src __maybe_unused, void *dst)
{
struct timeval tv_start, tv_end, tv_diff;
memset_t fn = r->fn.memset;
- void *dst = NULL;
int i;
- memset_alloc_mem(&dst, size);
-
/*
* We prefault the freshly allocated memory range here,
* to not measure page fault overhead:
@@ -352,7 +336,6 @@ static double do_memset_gettimeofday(const struct function *r, size_t size)
timersub(&tv_end, &tv_start, &tv_diff);
- free(dst);
return (double)(((double)size * nr_loops) / timeval2double(&tv_diff));
}
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
new file mode 100644
index 000000000000..4b419631753d
--- /dev/null
+++ b/tools/perf/builtin-c2c.c
@@ -0,0 +1,2780 @@
+/*
+ * This is rewrite of original c2c tool introduced in here:
+ * http://lwn.net/Articles/588866/
+ *
+ * The original tool was changed to fit in current perf state.
+ *
+ * Original authors:
+ * Don Zickus <dzickus@redhat.com>
+ * Dick Fowles <fowles@inreach.com>
+ * Joe Mario <jmario@redhat.com>
+ */
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include <asm/bug.h>
+#include "util.h"
+#include "debug.h"
+#include "builtin.h"
+#include <subcmd/parse-options.h>
+#include "mem-events.h"
+#include "session.h"
+#include "hist.h"
+#include "sort.h"
+#include "tool.h"
+#include "data.h"
+#include "sort.h"
+#include "evlist.h"
+#include "evsel.h"
+#include <asm/bug.h>
+#include "ui/browsers/hists.h"
+#include "evlist.h"
+
+struct c2c_hists {
+ struct hists hists;
+ struct perf_hpp_list list;
+ struct c2c_stats stats;
+};
+
+struct compute_stats {
+ struct stats lcl_hitm;
+ struct stats rmt_hitm;
+ struct stats load;
+};
+
+struct c2c_hist_entry {
+ struct c2c_hists *hists;
+ struct c2c_stats stats;
+ unsigned long *cpuset;
+ struct c2c_stats *node_stats;
+ unsigned int cacheline_idx;
+
+ struct compute_stats cstats;
+
+ /*
+ * must be at the end,
+ * because of its callchain dynamic entry
+ */
+ struct hist_entry he;
+};
+
+static char const *coalesce_default = "pid,tid,iaddr";
+
+struct perf_c2c {
+ struct perf_tool tool;
+ struct c2c_hists hists;
+
+ unsigned long **nodes;
+ int nodes_cnt;
+ int cpus_cnt;
+ int *cpu2node;
+ int node_info;
+
+ bool show_src;
+ bool show_all;
+ bool use_stdio;
+ bool stats_only;
+ bool symbol_full;
+
+ /* HITM shared clines stats */
+ struct c2c_stats hitm_stats;
+ int shared_clines;
+
+ int display;
+
+ const char *coalesce;
+ char *cl_sort;
+ char *cl_resort;
+ char *cl_output;
+};
+
+enum {
+ DISPLAY_LCL,
+ DISPLAY_RMT,
+ DISPLAY_TOT,
+ DISPLAY_MAX,
+};
+
+static const char *display_str[DISPLAY_MAX] = {
+ [DISPLAY_LCL] = "Local",
+ [DISPLAY_RMT] = "Remote",
+ [DISPLAY_TOT] = "Total",
+};
+
+static const struct option c2c_options[] = {
+ OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"),
+ OPT_END()
+};
+
+static struct perf_c2c c2c;
+
+static void *c2c_he_zalloc(size_t size)
+{
+ struct c2c_hist_entry *c2c_he;
+
+ c2c_he = zalloc(size + sizeof(*c2c_he));
+ if (!c2c_he)
+ return NULL;
+
+ c2c_he->cpuset = bitmap_alloc(c2c.cpus_cnt);
+ if (!c2c_he->cpuset)
+ return NULL;
+
+ c2c_he->node_stats = zalloc(c2c.nodes_cnt * sizeof(*c2c_he->node_stats));
+ if (!c2c_he->node_stats)
+ return NULL;
+
+ init_stats(&c2c_he->cstats.lcl_hitm);
+ init_stats(&c2c_he->cstats.rmt_hitm);
+ init_stats(&c2c_he->cstats.load);
+
+ return &c2c_he->he;
+}
+
+static void c2c_he_free(void *he)
+{
+ struct c2c_hist_entry *c2c_he;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ if (c2c_he->hists) {
+ hists__delete_entries(&c2c_he->hists->hists);
+ free(c2c_he->hists);
+ }
+
+ free(c2c_he->cpuset);
+ free(c2c_he->node_stats);
+ free(c2c_he);
+}
+
+static struct hist_entry_ops c2c_entry_ops = {
+ .new = c2c_he_zalloc,
+ .free = c2c_he_free,
+};
+
+static int c2c_hists__init(struct c2c_hists *hists,
+ const char *sort,
+ int nr_header_lines);
+
+static struct c2c_hists*
+he__get_c2c_hists(struct hist_entry *he,
+ const char *sort,
+ int nr_header_lines)
+{
+ struct c2c_hist_entry *c2c_he;
+ struct c2c_hists *hists;
+ int ret;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ if (c2c_he->hists)
+ return c2c_he->hists;
+
+ hists = c2c_he->hists = zalloc(sizeof(*hists));
+ if (!hists)
+ return NULL;
+
+ ret = c2c_hists__init(hists, sort, nr_header_lines);
+ if (ret) {
+ free(hists);
+ return NULL;
+ }
+
+ return hists;
+}
+
+static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
+ struct perf_sample *sample)
+{
+ if (WARN_ONCE(sample->cpu == (unsigned int) -1,
+ "WARNING: no sample cpu value"))
+ return;
+
+ set_bit(sample->cpu, c2c_he->cpuset);
+}
+
+static void compute_stats(struct c2c_hist_entry *c2c_he,
+ struct c2c_stats *stats,
+ u64 weight)
+{
+ struct compute_stats *cstats = &c2c_he->cstats;
+
+ if (stats->rmt_hitm)
+ update_stats(&cstats->rmt_hitm, weight);
+ else if (stats->lcl_hitm)
+ update_stats(&cstats->lcl_hitm, weight);
+ else if (stats->load)
+ update_stats(&cstats->load, weight);
+}
+
+static int process_sample_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel __maybe_unused,
+ struct machine *machine)
+{
+ struct c2c_hists *c2c_hists = &c2c.hists;
+ struct c2c_hist_entry *c2c_he;
+ struct c2c_stats stats = { .nr_entries = 0, };
+ struct hist_entry *he;
+ struct addr_location al;
+ struct mem_info *mi, *mi_dup;
+ int ret;
+
+ if (machine__resolve(machine, &al, sample) < 0) {
+ pr_debug("problem processing %d event, skipping it.\n",
+ event->header.type);
+ return -1;
+ }
+
+ ret = sample__resolve_callchain(sample, &callchain_cursor, NULL,
+ evsel, &al, sysctl_perf_event_max_stack);
+ if (ret)
+ goto out;
+
+ mi = sample__resolve_mem(sample, &al);
+ if (mi == NULL)
+ return -ENOMEM;
+
+ mi_dup = memdup(mi, sizeof(*mi));
+ if (!mi_dup)
+ goto free_mi;
+
+ c2c_decode_stats(&stats, mi);
+
+ he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
+ &al, NULL, NULL, mi,
+ sample, true);
+ if (he == NULL)
+ goto free_mi_dup;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ c2c_add_stats(&c2c_he->stats, &stats);
+ c2c_add_stats(&c2c_hists->stats, &stats);
+
+ c2c_he__set_cpu(c2c_he, sample);
+
+ hists__inc_nr_samples(&c2c_hists->hists, he->filtered);
+ ret = hist_entry__append_callchain(he, sample);
+
+ if (!ret) {
+ /*
+ * There's already been warning about missing
+ * sample's cpu value. Let's account all to
+ * node 0 in this case, without any further
+ * warning.
+ *
+ * Doing node stats only for single callchain data.
+ */
+ int cpu = sample->cpu == (unsigned int) -1 ? 0 : sample->cpu;
+ int node = c2c.cpu2node[cpu];
+
+ mi = mi_dup;
+
+ mi_dup = memdup(mi, sizeof(*mi));
+ if (!mi_dup)
+ goto free_mi;
+
+ c2c_hists = he__get_c2c_hists(he, c2c.cl_sort, 2);
+ if (!c2c_hists)
+ goto free_mi_dup;
+
+ he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
+ &al, NULL, NULL, mi,
+ sample, true);
+ if (he == NULL)
+ goto free_mi_dup;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ c2c_add_stats(&c2c_he->stats, &stats);
+ c2c_add_stats(&c2c_hists->stats, &stats);
+ c2c_add_stats(&c2c_he->node_stats[node], &stats);
+
+ compute_stats(c2c_he, &stats, sample->weight);
+
+ c2c_he__set_cpu(c2c_he, sample);
+
+ hists__inc_nr_samples(&c2c_hists->hists, he->filtered);
+ ret = hist_entry__append_callchain(he, sample);
+ }
+
+out:
+ addr_location__put(&al);
+ return ret;
+
+free_mi_dup:
+ free(mi_dup);
+free_mi:
+ free(mi);
+ ret = -ENOMEM;
+ goto out;
+}
+
+static struct perf_c2c c2c = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .lost = perf_event__process_lost,
+ .ordered_events = true,
+ .ordering_requires_timestamps = true,
+ },
+};
+
+static const char * const c2c_usage[] = {
+ "perf c2c {record|report}",
+ NULL
+};
+
+static const char * const __usage_report[] = {
+ "perf c2c report",
+ NULL
+};
+
+static const char * const *report_c2c_usage = __usage_report;
+
+#define C2C_HEADER_MAX 2
+
+struct c2c_header {
+ struct {
+ const char *text;
+ int span;
+ } line[C2C_HEADER_MAX];
+};
+
+struct c2c_dimension {
+ struct c2c_header header;
+ const char *name;
+ int width;
+ struct sort_entry *se;
+
+ int64_t (*cmp)(struct perf_hpp_fmt *fmt,
+ struct hist_entry *, struct hist_entry *);
+ int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+ int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+};
+
+struct c2c_fmt {
+ struct perf_hpp_fmt fmt;
+ struct c2c_dimension *dim;
+};
+
+#define SYMBOL_WIDTH 30
+
+static struct c2c_dimension dim_symbol;
+static struct c2c_dimension dim_srcline;
+
+static int symbol_width(struct hists *hists, struct sort_entry *se)
+{
+ int width = hists__col_len(hists, se->se_width_idx);
+
+ if (!c2c.symbol_full)
+ width = MIN(width, SYMBOL_WIDTH);
+
+ return width;
+}
+
+static int c2c_width(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp __maybe_unused,
+ struct hists *hists __maybe_unused)
+{
+ struct c2c_fmt *c2c_fmt;
+ struct c2c_dimension *dim;
+
+ c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
+ dim = c2c_fmt->dim;
+
+ if (dim == &dim_symbol || dim == &dim_srcline)
+ return symbol_width(hists, dim->se);
+
+ return dim->se ? hists__col_len(hists, dim->se->se_width_idx) :
+ c2c_fmt->dim->width;
+}
+
+static int c2c_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hists *hists, int line, int *span)
+{
+ struct perf_hpp_list *hpp_list = hists->hpp_list;
+ struct c2c_fmt *c2c_fmt;
+ struct c2c_dimension *dim;
+ const char *text = NULL;
+ int width = c2c_width(fmt, hpp, hists);
+
+ c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
+ dim = c2c_fmt->dim;
+
+ if (dim->se) {
+ text = dim->header.line[line].text;
+ /* Use the last line from sort_entry if not defined. */
+ if (!text && (line == hpp_list->nr_header_lines - 1))
+ text = dim->se->se_header;
+ } else {
+ text = dim->header.line[line].text;
+
+ if (*span) {
+ (*span)--;
+ return 0;
+ } else {
+ *span = dim->header.line[line].span;
+ }
+ }
+
+ if (text == NULL)
+ text = "";
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, text);
+}
+
+#define HEX_STR(__s, __v) \
+({ \
+ scnprintf(__s, sizeof(__s), "0x%" PRIx64, __v); \
+ __s; \
+})
+
+static int64_t
+dcacheline_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ return sort__dcacheline_cmp(left, right);
+}
+
+static int dcacheline_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ uint64_t addr = 0;
+ int width = c2c_width(fmt, hpp, he->hists);
+ char buf[20];
+
+ if (he->mem_info)
+ addr = cl_address(he->mem_info->daddr.addr);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
+}
+
+static int offset_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ uint64_t addr = 0;
+ int width = c2c_width(fmt, hpp, he->hists);
+ char buf[20];
+
+ if (he->mem_info)
+ addr = cl_offset(he->mem_info->daddr.al_addr);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
+}
+
+static int64_t
+offset_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ uint64_t l = 0, r = 0;
+
+ if (left->mem_info)
+ l = cl_offset(left->mem_info->daddr.addr);
+ if (right->mem_info)
+ r = cl_offset(right->mem_info->daddr.addr);
+
+ return (int64_t)(r - l);
+}
+
+static int
+iaddr_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ uint64_t addr = 0;
+ int width = c2c_width(fmt, hpp, he->hists);
+ char buf[20];
+
+ if (he->mem_info)
+ addr = he->mem_info->iaddr.addr;
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
+}
+
+static int64_t
+iaddr_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ return sort__iaddr_cmp(left, right);
+}
+
+static int
+tot_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ int width = c2c_width(fmt, hpp, he->hists);
+ unsigned int tot_hitm;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ tot_hitm = c2c_he->stats.lcl_hitm + c2c_he->stats.rmt_hitm;
+
+ return scnprintf(hpp->buf, hpp->size, "%*u", width, tot_hitm);
+}
+
+static int64_t
+tot_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ struct c2c_hist_entry *c2c_left;
+ struct c2c_hist_entry *c2c_right;
+ unsigned int tot_hitm_left;
+ unsigned int tot_hitm_right;
+
+ c2c_left = container_of(left, struct c2c_hist_entry, he);
+ c2c_right = container_of(right, struct c2c_hist_entry, he);
+
+ tot_hitm_left = c2c_left->stats.lcl_hitm + c2c_left->stats.rmt_hitm;
+ tot_hitm_right = c2c_right->stats.lcl_hitm + c2c_right->stats.rmt_hitm;
+
+ return tot_hitm_left - tot_hitm_right;
+}
+
+#define STAT_FN_ENTRY(__f) \
+static int \
+__f ## _entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ struct c2c_hist_entry *c2c_he; \
+ int width = c2c_width(fmt, hpp, he->hists); \
+ \
+ c2c_he = container_of(he, struct c2c_hist_entry, he); \
+ return scnprintf(hpp->buf, hpp->size, "%*u", width, \
+ c2c_he->stats.__f); \
+}
+
+#define STAT_FN_CMP(__f) \
+static int64_t \
+__f ## _cmp(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct hist_entry *left, struct hist_entry *right) \
+{ \
+ struct c2c_hist_entry *c2c_left, *c2c_right; \
+ \
+ c2c_left = container_of(left, struct c2c_hist_entry, he); \
+ c2c_right = container_of(right, struct c2c_hist_entry, he); \
+ return c2c_left->stats.__f - c2c_right->stats.__f; \
+}
+
+#define STAT_FN(__f) \
+ STAT_FN_ENTRY(__f) \
+ STAT_FN_CMP(__f)
+
+STAT_FN(rmt_hitm)
+STAT_FN(lcl_hitm)
+STAT_FN(store)
+STAT_FN(st_l1hit)
+STAT_FN(st_l1miss)
+STAT_FN(ld_fbhit)
+STAT_FN(ld_l1hit)
+STAT_FN(ld_l2hit)
+STAT_FN(ld_llchit)
+STAT_FN(rmt_hit)
+
+static uint64_t llc_miss(struct c2c_stats *stats)
+{
+ uint64_t llcmiss;
+
+ llcmiss = stats->lcl_dram +
+ stats->rmt_dram +
+ stats->rmt_hitm +
+ stats->rmt_hit;
+
+ return llcmiss;
+}
+
+static int
+ld_llcmiss_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ int width = c2c_width(fmt, hpp, he->hists);
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+
+ return scnprintf(hpp->buf, hpp->size, "%*lu", width,
+ llc_miss(&c2c_he->stats));
+}
+
+static int64_t
+ld_llcmiss_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ struct c2c_hist_entry *c2c_left;
+ struct c2c_hist_entry *c2c_right;
+
+ c2c_left = container_of(left, struct c2c_hist_entry, he);
+ c2c_right = container_of(right, struct c2c_hist_entry, he);
+
+ return llc_miss(&c2c_left->stats) - llc_miss(&c2c_right->stats);
+}
+
+static uint64_t total_records(struct c2c_stats *stats)
+{
+ uint64_t lclmiss, ldcnt, total;
+
+ lclmiss = stats->lcl_dram +
+ stats->rmt_dram +
+ stats->rmt_hitm +
+ stats->rmt_hit;
+
+ ldcnt = lclmiss +
+ stats->ld_fbhit +
+ stats->ld_l1hit +
+ stats->ld_l2hit +
+ stats->ld_llchit +
+ stats->lcl_hitm;
+
+ total = ldcnt +
+ stats->st_l1hit +
+ stats->st_l1miss;
+
+ return total;
+}
+
+static int
+tot_recs_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ int width = c2c_width(fmt, hpp, he->hists);
+ uint64_t tot_recs;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ tot_recs = total_records(&c2c_he->stats);
+
+ return scnprintf(hpp->buf, hpp->size, "%*" PRIu64, width, tot_recs);
+}
+
+static int64_t
+tot_recs_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ struct c2c_hist_entry *c2c_left;
+ struct c2c_hist_entry *c2c_right;
+ uint64_t tot_recs_left;
+ uint64_t tot_recs_right;
+
+ c2c_left = container_of(left, struct c2c_hist_entry, he);
+ c2c_right = container_of(right, struct c2c_hist_entry, he);
+
+ tot_recs_left = total_records(&c2c_left->stats);
+ tot_recs_right = total_records(&c2c_right->stats);
+
+ return tot_recs_left - tot_recs_right;
+}
+
+static uint64_t total_loads(struct c2c_stats *stats)
+{
+ uint64_t lclmiss, ldcnt;
+
+ lclmiss = stats->lcl_dram +
+ stats->rmt_dram +
+ stats->rmt_hitm +
+ stats->rmt_hit;
+
+ ldcnt = lclmiss +
+ stats->ld_fbhit +
+ stats->ld_l1hit +
+ stats->ld_l2hit +
+ stats->ld_llchit +
+ stats->lcl_hitm;
+
+ return ldcnt;
+}
+
+static int
+tot_loads_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ int width = c2c_width(fmt, hpp, he->hists);
+ uint64_t tot_recs;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ tot_recs = total_loads(&c2c_he->stats);
+
+ return scnprintf(hpp->buf, hpp->size, "%*" PRIu64, width, tot_recs);
+}
+
+static int64_t
+tot_loads_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ struct c2c_hist_entry *c2c_left;
+ struct c2c_hist_entry *c2c_right;
+ uint64_t tot_recs_left;
+ uint64_t tot_recs_right;
+
+ c2c_left = container_of(left, struct c2c_hist_entry, he);
+ c2c_right = container_of(right, struct c2c_hist_entry, he);
+
+ tot_recs_left = total_loads(&c2c_left->stats);
+ tot_recs_right = total_loads(&c2c_right->stats);
+
+ return tot_recs_left - tot_recs_right;
+}
+
+typedef double (get_percent_cb)(struct c2c_hist_entry *);
+
+static int
+percent_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he, get_percent_cb get_percent)
+{
+ struct c2c_hist_entry *c2c_he;
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ per = get_percent(c2c_he);
+
+#ifdef HAVE_SLANG_SUPPORT
+ if (use_browser)
+ return __hpp__slsmg_color_printf(hpp, "%*.2f%%", width - 1, per);
+#endif
+ return hpp_color_scnprintf(hpp, "%*.2f%%", width - 1, per);
+}
+
+static double percent_hitm(struct c2c_hist_entry *c2c_he)
+{
+ struct c2c_hists *hists;
+ struct c2c_stats *stats;
+ struct c2c_stats *total;
+ int tot = 0, st = 0;
+ double p;
+
+ hists = container_of(c2c_he->he.hists, struct c2c_hists, hists);
+ stats = &c2c_he->stats;
+ total = &hists->stats;
+
+ switch (c2c.display) {
+ case DISPLAY_RMT:
+ st = stats->rmt_hitm;
+ tot = total->rmt_hitm;
+ break;
+ case DISPLAY_LCL:
+ st = stats->lcl_hitm;
+ tot = total->lcl_hitm;
+ break;
+ case DISPLAY_TOT:
+ st = stats->tot_hitm;
+ tot = total->tot_hitm;
+ default:
+ break;
+ }
+
+ p = tot ? (double) st / tot : 0;
+
+ return 100 * p;
+}
+
+#define PERC_STR(__s, __v) \
+({ \
+ scnprintf(__s, sizeof(__s), "%.2F%%", __v); \
+ __s; \
+})
+
+static int
+percent_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ int width = c2c_width(fmt, hpp, he->hists);
+ char buf[10];
+ double per;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ per = percent_hitm(c2c_he);
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_hitm_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_hitm);
+}
+
+static int64_t
+percent_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ struct c2c_hist_entry *c2c_left;
+ struct c2c_hist_entry *c2c_right;
+ double per_left;
+ double per_right;
+
+ c2c_left = container_of(left, struct c2c_hist_entry, he);
+ c2c_right = container_of(right, struct c2c_hist_entry, he);
+
+ per_left = percent_hitm(c2c_left);
+ per_right = percent_hitm(c2c_right);
+
+ return per_left - per_right;
+}
+
+static struct c2c_stats *he_stats(struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ return &c2c_he->stats;
+}
+
+static struct c2c_stats *total_stats(struct hist_entry *he)
+{
+ struct c2c_hists *hists;
+
+ hists = container_of(he->hists, struct c2c_hists, hists);
+ return &hists->stats;
+}
+
+static double percent(int st, int tot)
+{
+ return tot ? 100. * (double) st / (double) tot : 0;
+}
+
+#define PERCENT(__h, __f) percent(he_stats(__h)->__f, total_stats(__h)->__f)
+
+#define PERCENT_FN(__f) \
+static double percent_ ## __f(struct c2c_hist_entry *c2c_he) \
+{ \
+ struct c2c_hists *hists; \
+ \
+ hists = container_of(c2c_he->he.hists, struct c2c_hists, hists); \
+ return percent(c2c_he->stats.__f, hists->stats.__f); \
+}
+
+PERCENT_FN(rmt_hitm)
+PERCENT_FN(lcl_hitm)
+PERCENT_FN(st_l1hit)
+PERCENT_FN(st_l1miss)
+
+static int
+percent_rmt_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, rmt_hitm);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_rmt_hitm_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_rmt_hitm);
+}
+
+static int64_t
+percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, lcl_hitm);
+ per_right = PERCENT(right, lcl_hitm);
+
+ return per_left - per_right;
+}
+
+static int
+percent_lcl_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, lcl_hitm);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_lcl_hitm_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_lcl_hitm);
+}
+
+static int64_t
+percent_lcl_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, lcl_hitm);
+ per_right = PERCENT(right, lcl_hitm);
+
+ return per_left - per_right;
+}
+
+static int
+percent_stores_l1hit_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, st_l1hit);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_stores_l1hit_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_st_l1hit);
+}
+
+static int64_t
+percent_stores_l1hit_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, st_l1hit);
+ per_right = PERCENT(right, st_l1hit);
+
+ return per_left - per_right;
+}
+
+static int
+percent_stores_l1miss_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, st_l1miss);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_stores_l1miss_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_st_l1miss);
+}
+
+static int64_t
+percent_stores_l1miss_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, st_l1miss);
+ per_right = PERCENT(right, st_l1miss);
+
+ return per_left - per_right;
+}
+
+STAT_FN(lcl_dram)
+STAT_FN(rmt_dram)
+
+static int
+pid_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+
+ return scnprintf(hpp->buf, hpp->size, "%*d", width, he->thread->pid_);
+}
+
+static int64_t
+pid_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ return left->thread->pid_ - right->thread->pid_;
+}
+
+static int64_t
+empty_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left __maybe_unused,
+ struct hist_entry *right __maybe_unused)
+{
+ return 0;
+}
+
+static int
+node_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ bool first = true;
+ int node;
+ int ret = 0;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+
+ for (node = 0; node < c2c.nodes_cnt; node++) {
+ DECLARE_BITMAP(set, c2c.cpus_cnt);
+
+ bitmap_zero(set, c2c.cpus_cnt);
+ bitmap_and(set, c2c_he->cpuset, c2c.nodes[node], c2c.cpus_cnt);
+
+ if (!bitmap_weight(set, c2c.cpus_cnt)) {
+ if (c2c.node_info == 1) {
+ ret = scnprintf(hpp->buf, hpp->size, "%21s", " ");
+ advance_hpp(hpp, ret);
+ }
+ continue;
+ }
+
+ if (!first) {
+ ret = scnprintf(hpp->buf, hpp->size, " ");
+ advance_hpp(hpp, ret);
+ }
+
+ switch (c2c.node_info) {
+ case 0:
+ ret = scnprintf(hpp->buf, hpp->size, "%2d", node);
+ advance_hpp(hpp, ret);
+ break;
+ case 1:
+ {
+ int num = bitmap_weight(c2c_he->cpuset, c2c.cpus_cnt);
+ struct c2c_stats *stats = &c2c_he->node_stats[node];
+
+ ret = scnprintf(hpp->buf, hpp->size, "%2d{%2d ", node, num);
+ advance_hpp(hpp, ret);
+
+ #define DISPLAY_HITM(__h) \
+ if (c2c_he->stats.__h> 0) { \
+ ret = scnprintf(hpp->buf, hpp->size, "%5.1f%% ", \
+ percent(stats->__h, c2c_he->stats.__h));\
+ } else { \
+ ret = scnprintf(hpp->buf, hpp->size, "%6s ", "n/a"); \
+ }
+
+ switch (c2c.display) {
+ case DISPLAY_RMT:
+ DISPLAY_HITM(rmt_hitm);
+ break;
+ case DISPLAY_LCL:
+ DISPLAY_HITM(lcl_hitm);
+ break;
+ case DISPLAY_TOT:
+ DISPLAY_HITM(tot_hitm);
+ default:
+ break;
+ }
+
+ #undef DISPLAY_HITM
+
+ advance_hpp(hpp, ret);
+
+ if (c2c_he->stats.store > 0) {
+ ret = scnprintf(hpp->buf, hpp->size, "%5.1f%%}",
+ percent(stats->store, c2c_he->stats.store));
+ } else {
+ ret = scnprintf(hpp->buf, hpp->size, "%6s}", "n/a");
+ }
+
+ advance_hpp(hpp, ret);
+ break;
+ }
+ case 2:
+ ret = scnprintf(hpp->buf, hpp->size, "%2d{", node);
+ advance_hpp(hpp, ret);
+
+ ret = bitmap_scnprintf(set, c2c.cpus_cnt, hpp->buf, hpp->size);
+ advance_hpp(hpp, ret);
+
+ ret = scnprintf(hpp->buf, hpp->size, "}");
+ advance_hpp(hpp, ret);
+ break;
+ default:
+ break;
+ }
+
+ first = false;
+ }
+
+ return 0;
+}
+
+static int
+mean_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he, double mean)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ char buf[10];
+
+ scnprintf(buf, 10, "%6.0f", mean);
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, buf);
+}
+
+#define MEAN_ENTRY(__func, __val) \
+static int \
+__func(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) \
+{ \
+ struct c2c_hist_entry *c2c_he; \
+ c2c_he = container_of(he, struct c2c_hist_entry, he); \
+ return mean_entry(fmt, hpp, he, avg_stats(&c2c_he->cstats.__val)); \
+}
+
+MEAN_ENTRY(mean_rmt_entry, rmt_hitm);
+MEAN_ENTRY(mean_lcl_entry, lcl_hitm);
+MEAN_ENTRY(mean_load_entry, load);
+
+static int
+cpucnt_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ int width = c2c_width(fmt, hpp, he->hists);
+ char buf[10];
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+
+ scnprintf(buf, 10, "%d", bitmap_weight(c2c_he->cpuset, c2c.cpus_cnt));
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, buf);
+}
+
+static int
+cl_idx_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ int width = c2c_width(fmt, hpp, he->hists);
+ char buf[10];
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+
+ scnprintf(buf, 10, "%u", c2c_he->cacheline_idx);
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, buf);
+}
+
+static int
+cl_idx_empty_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, "");
+}
+
+#define HEADER_LOW(__h) \
+ { \
+ .line[1] = { \
+ .text = __h, \
+ }, \
+ }
+
+#define HEADER_BOTH(__h0, __h1) \
+ { \
+ .line[0] = { \
+ .text = __h0, \
+ }, \
+ .line[1] = { \
+ .text = __h1, \
+ }, \
+ }
+
+#define HEADER_SPAN(__h0, __h1, __s) \
+ { \
+ .line[0] = { \
+ .text = __h0, \
+ .span = __s, \
+ }, \
+ .line[1] = { \
+ .text = __h1, \
+ }, \
+ }
+
+#define HEADER_SPAN_LOW(__h) \
+ { \
+ .line[1] = { \
+ .text = __h, \
+ }, \
+ }
+
+static struct c2c_dimension dim_dcacheline = {
+ .header = HEADER_LOW("Cacheline"),
+ .name = "dcacheline",
+ .cmp = dcacheline_cmp,
+ .entry = dcacheline_entry,
+ .width = 18,
+};
+
+static struct c2c_header header_offset_tui = HEADER_LOW("Off");
+
+static struct c2c_dimension dim_offset = {
+ .header = HEADER_BOTH("Data address", "Offset"),
+ .name = "offset",
+ .cmp = offset_cmp,
+ .entry = offset_entry,
+ .width = 18,
+};
+
+static struct c2c_dimension dim_iaddr = {
+ .header = HEADER_LOW("Code address"),
+ .name = "iaddr",
+ .cmp = iaddr_cmp,
+ .entry = iaddr_entry,
+ .width = 18,
+};
+
+static struct c2c_dimension dim_tot_hitm = {
+ .header = HEADER_SPAN("----- LLC Load Hitm -----", "Total", 2),
+ .name = "tot_hitm",
+ .cmp = tot_hitm_cmp,
+ .entry = tot_hitm_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_lcl_hitm = {
+ .header = HEADER_SPAN_LOW("Lcl"),
+ .name = "lcl_hitm",
+ .cmp = lcl_hitm_cmp,
+ .entry = lcl_hitm_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_rmt_hitm = {
+ .header = HEADER_SPAN_LOW("Rmt"),
+ .name = "rmt_hitm",
+ .cmp = rmt_hitm_cmp,
+ .entry = rmt_hitm_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_cl_rmt_hitm = {
+ .header = HEADER_SPAN("----- HITM -----", "Rmt", 1),
+ .name = "cl_rmt_hitm",
+ .cmp = rmt_hitm_cmp,
+ .entry = rmt_hitm_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_cl_lcl_hitm = {
+ .header = HEADER_SPAN_LOW("Lcl"),
+ .name = "cl_lcl_hitm",
+ .cmp = lcl_hitm_cmp,
+ .entry = lcl_hitm_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_stores = {
+ .header = HEADER_SPAN("---- Store Reference ----", "Total", 2),
+ .name = "stores",
+ .cmp = store_cmp,
+ .entry = store_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_stores_l1hit = {
+ .header = HEADER_SPAN_LOW("L1Hit"),
+ .name = "stores_l1hit",
+ .cmp = st_l1hit_cmp,
+ .entry = st_l1hit_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_stores_l1miss = {
+ .header = HEADER_SPAN_LOW("L1Miss"),
+ .name = "stores_l1miss",
+ .cmp = st_l1miss_cmp,
+ .entry = st_l1miss_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_cl_stores_l1hit = {
+ .header = HEADER_SPAN("-- Store Refs --", "L1 Hit", 1),
+ .name = "cl_stores_l1hit",
+ .cmp = st_l1hit_cmp,
+ .entry = st_l1hit_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_cl_stores_l1miss = {
+ .header = HEADER_SPAN_LOW("L1 Miss"),
+ .name = "cl_stores_l1miss",
+ .cmp = st_l1miss_cmp,
+ .entry = st_l1miss_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_ld_fbhit = {
+ .header = HEADER_SPAN("----- Core Load Hit -----", "FB", 2),
+ .name = "ld_fbhit",
+ .cmp = ld_fbhit_cmp,
+ .entry = ld_fbhit_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_ld_l1hit = {
+ .header = HEADER_SPAN_LOW("L1"),
+ .name = "ld_l1hit",
+ .cmp = ld_l1hit_cmp,
+ .entry = ld_l1hit_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_ld_l2hit = {
+ .header = HEADER_SPAN_LOW("L2"),
+ .name = "ld_l2hit",
+ .cmp = ld_l2hit_cmp,
+ .entry = ld_l2hit_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_ld_llchit = {
+ .header = HEADER_SPAN("-- LLC Load Hit --", "Llc", 1),
+ .name = "ld_lclhit",
+ .cmp = ld_llchit_cmp,
+ .entry = ld_llchit_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_ld_rmthit = {
+ .header = HEADER_SPAN_LOW("Rmt"),
+ .name = "ld_rmthit",
+ .cmp = rmt_hit_cmp,
+ .entry = rmt_hit_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_ld_llcmiss = {
+ .header = HEADER_BOTH("LLC", "Ld Miss"),
+ .name = "ld_llcmiss",
+ .cmp = ld_llcmiss_cmp,
+ .entry = ld_llcmiss_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_tot_recs = {
+ .header = HEADER_BOTH("Total", "records"),
+ .name = "tot_recs",
+ .cmp = tot_recs_cmp,
+ .entry = tot_recs_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_tot_loads = {
+ .header = HEADER_BOTH("Total", "Loads"),
+ .name = "tot_loads",
+ .cmp = tot_loads_cmp,
+ .entry = tot_loads_entry,
+ .width = 7,
+};
+
+static struct c2c_header percent_hitm_header[] = {
+ [DISPLAY_LCL] = HEADER_BOTH("Lcl", "Hitm"),
+ [DISPLAY_RMT] = HEADER_BOTH("Rmt", "Hitm"),
+ [DISPLAY_TOT] = HEADER_BOTH("Tot", "Hitm"),
+};
+
+static struct c2c_dimension dim_percent_hitm = {
+ .name = "percent_hitm",
+ .cmp = percent_hitm_cmp,
+ .entry = percent_hitm_entry,
+ .color = percent_hitm_color,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_percent_rmt_hitm = {
+ .header = HEADER_SPAN("----- HITM -----", "Rmt", 1),
+ .name = "percent_rmt_hitm",
+ .cmp = percent_rmt_hitm_cmp,
+ .entry = percent_rmt_hitm_entry,
+ .color = percent_rmt_hitm_color,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_percent_lcl_hitm = {
+ .header = HEADER_SPAN_LOW("Lcl"),
+ .name = "percent_lcl_hitm",
+ .cmp = percent_lcl_hitm_cmp,
+ .entry = percent_lcl_hitm_entry,
+ .color = percent_lcl_hitm_color,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_percent_stores_l1hit = {
+ .header = HEADER_SPAN("-- Store Refs --", "L1 Hit", 1),
+ .name = "percent_stores_l1hit",
+ .cmp = percent_stores_l1hit_cmp,
+ .entry = percent_stores_l1hit_entry,
+ .color = percent_stores_l1hit_color,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_percent_stores_l1miss = {
+ .header = HEADER_SPAN_LOW("L1 Miss"),
+ .name = "percent_stores_l1miss",
+ .cmp = percent_stores_l1miss_cmp,
+ .entry = percent_stores_l1miss_entry,
+ .color = percent_stores_l1miss_color,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_dram_lcl = {
+ .header = HEADER_SPAN("--- Load Dram ----", "Lcl", 1),
+ .name = "dram_lcl",
+ .cmp = lcl_dram_cmp,
+ .entry = lcl_dram_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_dram_rmt = {
+ .header = HEADER_SPAN_LOW("Rmt"),
+ .name = "dram_rmt",
+ .cmp = rmt_dram_cmp,
+ .entry = rmt_dram_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_pid = {
+ .header = HEADER_LOW("Pid"),
+ .name = "pid",
+ .cmp = pid_cmp,
+ .entry = pid_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_tid = {
+ .header = HEADER_LOW("Tid"),
+ .name = "tid",
+ .se = &sort_thread,
+};
+
+static struct c2c_dimension dim_symbol = {
+ .name = "symbol",
+ .se = &sort_sym,
+};
+
+static struct c2c_dimension dim_dso = {
+ .header = HEADER_BOTH("Shared", "Object"),
+ .name = "dso",
+ .se = &sort_dso,
+};
+
+static struct c2c_header header_node[3] = {
+ HEADER_LOW("Node"),
+ HEADER_LOW("Node{cpus %hitms %stores}"),
+ HEADER_LOW("Node{cpu list}"),
+};
+
+static struct c2c_dimension dim_node = {
+ .name = "node",
+ .cmp = empty_cmp,
+ .entry = node_entry,
+ .width = 4,
+};
+
+static struct c2c_dimension dim_mean_rmt = {
+ .header = HEADER_SPAN("---------- cycles ----------", "rmt hitm", 2),
+ .name = "mean_rmt",
+ .cmp = empty_cmp,
+ .entry = mean_rmt_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_mean_lcl = {
+ .header = HEADER_SPAN_LOW("lcl hitm"),
+ .name = "mean_lcl",
+ .cmp = empty_cmp,
+ .entry = mean_lcl_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_mean_load = {
+ .header = HEADER_SPAN_LOW("load"),
+ .name = "mean_load",
+ .cmp = empty_cmp,
+ .entry = mean_load_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_cpucnt = {
+ .header = HEADER_BOTH("cpu", "cnt"),
+ .name = "cpucnt",
+ .cmp = empty_cmp,
+ .entry = cpucnt_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_srcline = {
+ .name = "cl_srcline",
+ .se = &sort_srcline,
+};
+
+static struct c2c_dimension dim_dcacheline_idx = {
+ .header = HEADER_LOW("Index"),
+ .name = "cl_idx",
+ .cmp = empty_cmp,
+ .entry = cl_idx_entry,
+ .width = 5,
+};
+
+static struct c2c_dimension dim_dcacheline_num = {
+ .header = HEADER_LOW("Num"),
+ .name = "cl_num",
+ .cmp = empty_cmp,
+ .entry = cl_idx_entry,
+ .width = 5,
+};
+
+static struct c2c_dimension dim_dcacheline_num_empty = {
+ .header = HEADER_LOW("Num"),
+ .name = "cl_num_empty",
+ .cmp = empty_cmp,
+ .entry = cl_idx_empty_entry,
+ .width = 5,
+};
+
+static struct c2c_dimension *dimensions[] = {
+ &dim_dcacheline,
+ &dim_offset,
+ &dim_iaddr,
+ &dim_tot_hitm,
+ &dim_lcl_hitm,
+ &dim_rmt_hitm,
+ &dim_cl_lcl_hitm,
+ &dim_cl_rmt_hitm,
+ &dim_stores,
+ &dim_stores_l1hit,
+ &dim_stores_l1miss,
+ &dim_cl_stores_l1hit,
+ &dim_cl_stores_l1miss,
+ &dim_ld_fbhit,
+ &dim_ld_l1hit,
+ &dim_ld_l2hit,
+ &dim_ld_llchit,
+ &dim_ld_rmthit,
+ &dim_ld_llcmiss,
+ &dim_tot_recs,
+ &dim_tot_loads,
+ &dim_percent_hitm,
+ &dim_percent_rmt_hitm,
+ &dim_percent_lcl_hitm,
+ &dim_percent_stores_l1hit,
+ &dim_percent_stores_l1miss,
+ &dim_dram_lcl,
+ &dim_dram_rmt,
+ &dim_pid,
+ &dim_tid,
+ &dim_symbol,
+ &dim_dso,
+ &dim_node,
+ &dim_mean_rmt,
+ &dim_mean_lcl,
+ &dim_mean_load,
+ &dim_cpucnt,
+ &dim_srcline,
+ &dim_dcacheline_idx,
+ &dim_dcacheline_num,
+ &dim_dcacheline_num_empty,
+ NULL,
+};
+
+static void fmt_free(struct perf_hpp_fmt *fmt)
+{
+ struct c2c_fmt *c2c_fmt;
+
+ c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
+ free(c2c_fmt);
+}
+
+static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+ struct c2c_fmt *c2c_a = container_of(a, struct c2c_fmt, fmt);
+ struct c2c_fmt *c2c_b = container_of(b, struct c2c_fmt, fmt);
+
+ return c2c_a->dim == c2c_b->dim;
+}
+
+static struct c2c_dimension *get_dimension(const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; dimensions[i]; i++) {
+ struct c2c_dimension *dim = dimensions[i];
+
+ if (!strcmp(dim->name, name))
+ return dim;
+ };
+
+ return NULL;
+}
+
+static int c2c_se_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct c2c_fmt *c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
+ struct c2c_dimension *dim = c2c_fmt->dim;
+ size_t len = fmt->user_len;
+
+ if (!len) {
+ len = hists__col_len(he->hists, dim->se->se_width_idx);
+
+ if (dim == &dim_symbol || dim == &dim_srcline)
+ len = symbol_width(he->hists, dim->se);
+ }
+
+ return dim->se->se_snprintf(he, hpp->buf, hpp->size, len);
+}
+
+static int64_t c2c_se_cmp(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b)
+{
+ struct c2c_fmt *c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
+ struct c2c_dimension *dim = c2c_fmt->dim;
+
+ return dim->se->se_cmp(a, b);
+}
+
+static int64_t c2c_se_collapse(struct perf_hpp_fmt *fmt,
+ struct hist_entry *a, struct hist_entry *b)
+{
+ struct c2c_fmt *c2c_fmt = container_of(fmt, struct c2c_fmt, fmt);
+ struct c2c_dimension *dim = c2c_fmt->dim;
+ int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
+
+ collapse_fn = dim->se->se_collapse ?: dim->se->se_cmp;
+ return collapse_fn(a, b);
+}
+
+static struct c2c_fmt *get_format(const char *name)
+{
+ struct c2c_dimension *dim = get_dimension(name);
+ struct c2c_fmt *c2c_fmt;
+ struct perf_hpp_fmt *fmt;
+
+ if (!dim)
+ return NULL;
+
+ c2c_fmt = zalloc(sizeof(*c2c_fmt));
+ if (!c2c_fmt)
+ return NULL;
+
+ c2c_fmt->dim = dim;
+
+ fmt = &c2c_fmt->fmt;
+ INIT_LIST_HEAD(&fmt->list);
+ INIT_LIST_HEAD(&fmt->sort_list);
+
+ fmt->cmp = dim->se ? c2c_se_cmp : dim->cmp;
+ fmt->sort = dim->se ? c2c_se_cmp : dim->cmp;
+ fmt->color = dim->se ? NULL : dim->color;
+ fmt->entry = dim->se ? c2c_se_entry : dim->entry;
+ fmt->header = c2c_header;
+ fmt->width = c2c_width;
+ fmt->collapse = dim->se ? c2c_se_collapse : dim->cmp;
+ fmt->equal = fmt_equal;
+ fmt->free = fmt_free;
+
+ return c2c_fmt;
+}
+
+static int c2c_hists__init_output(struct perf_hpp_list *hpp_list, char *name)
+{
+ struct c2c_fmt *c2c_fmt = get_format(name);
+
+ if (!c2c_fmt) {
+ reset_dimensions();
+ return output_field_add(hpp_list, name);
+ }
+
+ perf_hpp_list__column_register(hpp_list, &c2c_fmt->fmt);
+ return 0;
+}
+
+static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
+{
+ struct c2c_fmt *c2c_fmt = get_format(name);
+ struct c2c_dimension *dim;
+
+ if (!c2c_fmt) {
+ reset_dimensions();
+ return sort_dimension__add(hpp_list, name, NULL, 0);
+ }
+
+ dim = c2c_fmt->dim;
+ if (dim == &dim_dso)
+ hpp_list->dso = 1;
+
+ perf_hpp_list__register_sort_field(hpp_list, &c2c_fmt->fmt);
+ return 0;
+}
+
+#define PARSE_LIST(_list, _fn) \
+ do { \
+ char *tmp, *tok; \
+ ret = 0; \
+ \
+ if (!_list) \
+ break; \
+ \
+ for (tok = strtok_r((char *)_list, ", ", &tmp); \
+ tok; tok = strtok_r(NULL, ", ", &tmp)) { \
+ ret = _fn(hpp_list, tok); \
+ if (ret == -EINVAL) { \
+ error("Invalid --fields key: `%s'", tok); \
+ break; \
+ } else if (ret == -ESRCH) { \
+ error("Unknown --fields key: `%s'", tok); \
+ break; \
+ } \
+ } \
+ } while (0)
+
+static int hpp_list__parse(struct perf_hpp_list *hpp_list,
+ const char *output_,
+ const char *sort_)
+{
+ char *output = output_ ? strdup(output_) : NULL;
+ char *sort = sort_ ? strdup(sort_) : NULL;
+ int ret;
+
+ PARSE_LIST(output, c2c_hists__init_output);
+ PARSE_LIST(sort, c2c_hists__init_sort);
+
+ /* copy sort keys to output fields */
+ perf_hpp__setup_output_field(hpp_list);
+
+ /*
+ * We dont need other sorting keys other than those
+ * we already specified. It also really slows down
+ * the processing a lot with big number of output
+ * fields, so switching this off for c2c.
+ */
+
+#if 0
+ /* and then copy output fields to sort keys */
+ perf_hpp__append_sort_keys(&hists->list);
+#endif
+
+ free(output);
+ free(sort);
+ return ret;
+}
+
+static int c2c_hists__init(struct c2c_hists *hists,
+ const char *sort,
+ int nr_header_lines)
+{
+ __hists__init(&hists->hists, &hists->list);
+
+ /*
+ * Initialize only with sort fields, we need to resort
+ * later anyway, and that's where we add output fields
+ * as well.
+ */
+ perf_hpp_list__init(&hists->list);
+
+ /* Overload number of header lines.*/
+ hists->list.nr_header_lines = nr_header_lines;
+
+ return hpp_list__parse(&hists->list, NULL, sort);
+}
+
+__maybe_unused
+static int c2c_hists__reinit(struct c2c_hists *c2c_hists,
+ const char *output,
+ const char *sort)
+{
+ perf_hpp__reset_output_field(&c2c_hists->list);
+ return hpp_list__parse(&c2c_hists->list, output, sort);
+}
+
+#define DISPLAY_LINE_LIMIT 0.0005
+
+static bool he__display(struct hist_entry *he, struct c2c_stats *stats)
+{
+ struct c2c_hist_entry *c2c_he;
+ double ld_dist;
+
+ if (c2c.show_all)
+ return true;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+
+#define FILTER_HITM(__h) \
+ if (stats->__h) { \
+ ld_dist = ((double)c2c_he->stats.__h / stats->__h); \
+ if (ld_dist < DISPLAY_LINE_LIMIT) \
+ he->filtered = HIST_FILTER__C2C; \
+ } else { \
+ he->filtered = HIST_FILTER__C2C; \
+ }
+
+ switch (c2c.display) {
+ case DISPLAY_LCL:
+ FILTER_HITM(lcl_hitm);
+ break;
+ case DISPLAY_RMT:
+ FILTER_HITM(rmt_hitm);
+ break;
+ case DISPLAY_TOT:
+ FILTER_HITM(tot_hitm);
+ default:
+ break;
+ };
+
+#undef FILTER_HITM
+
+ return he->filtered == 0;
+}
+
+static inline int valid_hitm_or_store(struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ bool has_hitm;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ has_hitm = c2c.display == DISPLAY_TOT ? c2c_he->stats.tot_hitm :
+ c2c.display == DISPLAY_LCL ? c2c_he->stats.lcl_hitm :
+ c2c_he->stats.rmt_hitm;
+ return has_hitm || c2c_he->stats.store;
+}
+
+static void calc_width(struct hist_entry *he)
+{
+ struct c2c_hists *c2c_hists;
+
+ c2c_hists = container_of(he->hists, struct c2c_hists, hists);
+ hists__calc_col_len(&c2c_hists->hists, he);
+}
+
+static int filter_cb(struct hist_entry *he)
+{
+ if (c2c.show_src && !he->srcline)
+ he->srcline = hist_entry__get_srcline(he);
+
+ calc_width(he);
+
+ if (!valid_hitm_or_store(he))
+ he->filtered = HIST_FILTER__C2C;
+
+ return 0;
+}
+
+static int resort_cl_cb(struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ struct c2c_hists *c2c_hists;
+ bool display = he__display(he, &c2c.hitm_stats);
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ c2c_hists = c2c_he->hists;
+
+ calc_width(he);
+
+ if (display && c2c_hists) {
+ static unsigned int idx;
+
+ c2c_he->cacheline_idx = idx++;
+
+ c2c_hists__reinit(c2c_hists, c2c.cl_output, c2c.cl_resort);
+
+ hists__collapse_resort(&c2c_hists->hists, NULL);
+ hists__output_resort_cb(&c2c_hists->hists, NULL, filter_cb);
+ }
+
+ return 0;
+}
+
+static void setup_nodes_header(void)
+{
+ dim_node.header = header_node[c2c.node_info];
+}
+
+static int setup_nodes(struct perf_session *session)
+{
+ struct numa_node *n;
+ unsigned long **nodes;
+ int node, cpu;
+ int *cpu2node;
+
+ if (c2c.node_info > 2)
+ c2c.node_info = 2;
+
+ c2c.nodes_cnt = session->header.env.nr_numa_nodes;
+ c2c.cpus_cnt = session->header.env.nr_cpus_online;
+
+ n = session->header.env.numa_nodes;
+ if (!n)
+ return -EINVAL;
+
+ nodes = zalloc(sizeof(unsigned long *) * c2c.nodes_cnt);
+ if (!nodes)
+ return -ENOMEM;
+
+ c2c.nodes = nodes;
+
+ cpu2node = zalloc(sizeof(int) * c2c.cpus_cnt);
+ if (!cpu2node)
+ return -ENOMEM;
+
+ for (cpu = 0; cpu < c2c.cpus_cnt; cpu++)
+ cpu2node[cpu] = -1;
+
+ c2c.cpu2node = cpu2node;
+
+ for (node = 0; node < c2c.nodes_cnt; node++) {
+ struct cpu_map *map = n[node].map;
+ unsigned long *set;
+
+ set = bitmap_alloc(c2c.cpus_cnt);
+ if (!set)
+ return -ENOMEM;
+
+ for (cpu = 0; cpu < map->nr; cpu++) {
+ set_bit(map->map[cpu], set);
+
+ if (WARN_ONCE(cpu2node[map->map[cpu]] != -1, "node/cpu topology bug"))
+ return -EINVAL;
+
+ cpu2node[map->map[cpu]] = node;
+ }
+
+ nodes[node] = set;
+ }
+
+ setup_nodes_header();
+ return 0;
+}
+
+#define HAS_HITMS(__h) ((__h)->stats.lcl_hitm || (__h)->stats.rmt_hitm)
+
+static int resort_hitm_cb(struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+
+ if (HAS_HITMS(c2c_he)) {
+ c2c.shared_clines++;
+ c2c_add_stats(&c2c.hitm_stats, &c2c_he->stats);
+ }
+
+ return 0;
+}
+
+static int hists__iterate_cb(struct hists *hists, hists__resort_cb_t cb)
+{
+ struct rb_node *next = rb_first(&hists->entries);
+ int ret = 0;
+
+ while (next) {
+ struct hist_entry *he;
+
+ he = rb_entry(next, struct hist_entry, rb_node);
+ ret = cb(he);
+ if (ret)
+ break;
+ next = rb_next(&he->rb_node);
+ }
+
+ return ret;
+}
+
+static void print_c2c__display_stats(FILE *out)
+{
+ int llc_misses;
+ struct c2c_stats *stats = &c2c.hists.stats;
+
+ llc_misses = stats->lcl_dram +
+ stats->rmt_dram +
+ stats->rmt_hit +
+ stats->rmt_hitm;
+
+ fprintf(out, "=================================================\n");
+ fprintf(out, " Trace Event Information \n");
+ fprintf(out, "=================================================\n");
+ fprintf(out, " Total records : %10d\n", stats->nr_entries);
+ fprintf(out, " Locked Load/Store Operations : %10d\n", stats->locks);
+ fprintf(out, " Load Operations : %10d\n", stats->load);
+ fprintf(out, " Loads - uncacheable : %10d\n", stats->ld_uncache);
+ fprintf(out, " Loads - IO : %10d\n", stats->ld_io);
+ fprintf(out, " Loads - Miss : %10d\n", stats->ld_miss);
+ fprintf(out, " Loads - no mapping : %10d\n", stats->ld_noadrs);
+ fprintf(out, " Load Fill Buffer Hit : %10d\n", stats->ld_fbhit);
+ fprintf(out, " Load L1D hit : %10d\n", stats->ld_l1hit);
+ fprintf(out, " Load L2D hit : %10d\n", stats->ld_l2hit);
+ fprintf(out, " Load LLC hit : %10d\n", stats->ld_llchit + stats->lcl_hitm);
+ fprintf(out, " Load Local HITM : %10d\n", stats->lcl_hitm);
+ fprintf(out, " Load Remote HITM : %10d\n", stats->rmt_hitm);
+ fprintf(out, " Load Remote HIT : %10d\n", stats->rmt_hit);
+ fprintf(out, " Load Local DRAM : %10d\n", stats->lcl_dram);
+ fprintf(out, " Load Remote DRAM : %10d\n", stats->rmt_dram);
+ fprintf(out, " Load MESI State Exclusive : %10d\n", stats->ld_excl);
+ fprintf(out, " Load MESI State Shared : %10d\n", stats->ld_shared);
+ fprintf(out, " Load LLC Misses : %10d\n", llc_misses);
+ fprintf(out, " LLC Misses to Local DRAM : %10.1f%%\n", ((double)stats->lcl_dram/(double)llc_misses) * 100.);
+ fprintf(out, " LLC Misses to Remote DRAM : %10.1f%%\n", ((double)stats->rmt_dram/(double)llc_misses) * 100.);
+ fprintf(out, " LLC Misses to Remote cache (HIT) : %10.1f%%\n", ((double)stats->rmt_hit /(double)llc_misses) * 100.);
+ fprintf(out, " LLC Misses to Remote cache (HITM) : %10.1f%%\n", ((double)stats->rmt_hitm/(double)llc_misses) * 100.);
+ fprintf(out, " Store Operations : %10d\n", stats->store);
+ fprintf(out, " Store - uncacheable : %10d\n", stats->st_uncache);
+ fprintf(out, " Store - no mapping : %10d\n", stats->st_noadrs);
+ fprintf(out, " Store L1D Hit : %10d\n", stats->st_l1hit);
+ fprintf(out, " Store L1D Miss : %10d\n", stats->st_l1miss);
+ fprintf(out, " No Page Map Rejects : %10d\n", stats->nomap);
+ fprintf(out, " Unable to parse data source : %10d\n", stats->noparse);
+}
+
+static void print_shared_cacheline_info(FILE *out)
+{
+ struct c2c_stats *stats = &c2c.hitm_stats;
+ int hitm_cnt = stats->lcl_hitm + stats->rmt_hitm;
+
+ fprintf(out, "=================================================\n");
+ fprintf(out, " Global Shared Cache Line Event Information \n");
+ fprintf(out, "=================================================\n");
+ fprintf(out, " Total Shared Cache Lines : %10d\n", c2c.shared_clines);
+ fprintf(out, " Load HITs on shared lines : %10d\n", stats->load);
+ fprintf(out, " Fill Buffer Hits on shared lines : %10d\n", stats->ld_fbhit);
+ fprintf(out, " L1D hits on shared lines : %10d\n", stats->ld_l1hit);
+ fprintf(out, " L2D hits on shared lines : %10d\n", stats->ld_l2hit);
+ fprintf(out, " LLC hits on shared lines : %10d\n", stats->ld_llchit + stats->lcl_hitm);
+ fprintf(out, " Locked Access on shared lines : %10d\n", stats->locks);
+ fprintf(out, " Store HITs on shared lines : %10d\n", stats->store);
+ fprintf(out, " Store L1D hits on shared lines : %10d\n", stats->st_l1hit);
+ fprintf(out, " Total Merged records : %10d\n", hitm_cnt + stats->store);
+}
+
+static void print_cacheline(struct c2c_hists *c2c_hists,
+ struct hist_entry *he_cl,
+ struct perf_hpp_list *hpp_list,
+ FILE *out)
+{
+ char bf[1000];
+ struct perf_hpp hpp = {
+ .buf = bf,
+ .size = 1000,
+ };
+ static bool once;
+
+ if (!once) {
+ hists__fprintf_headers(&c2c_hists->hists, out);
+ once = true;
+ } else {
+ fprintf(out, "\n");
+ }
+
+ fprintf(out, " -------------------------------------------------------------\n");
+ __hist_entry__snprintf(he_cl, &hpp, hpp_list);
+ fprintf(out, "%s\n", bf);
+ fprintf(out, " -------------------------------------------------------------\n");
+
+ hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, true);
+}
+
+static void print_pareto(FILE *out)
+{
+ struct perf_hpp_list hpp_list;
+ struct rb_node *nd;
+ int ret;
+
+ perf_hpp_list__init(&hpp_list);
+ ret = hpp_list__parse(&hpp_list,
+ "cl_num,"
+ "cl_rmt_hitm,"
+ "cl_lcl_hitm,"
+ "cl_stores_l1hit,"
+ "cl_stores_l1miss,"
+ "dcacheline",
+ NULL);
+
+ if (WARN_ONCE(ret, "failed to setup sort entries\n"))
+ return;
+
+ nd = rb_first(&c2c.hists.hists.entries);
+
+ for (; nd; nd = rb_next(nd)) {
+ struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+ struct c2c_hist_entry *c2c_he;
+
+ if (he->filtered)
+ continue;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ print_cacheline(c2c_he->hists, he, &hpp_list, out);
+ }
+}
+
+static void print_c2c_info(FILE *out, struct perf_session *session)
+{
+ struct perf_evlist *evlist = session->evlist;
+ struct perf_evsel *evsel;
+ bool first = true;
+
+ fprintf(out, "=================================================\n");
+ fprintf(out, " c2c details \n");
+ fprintf(out, "=================================================\n");
+
+ evlist__for_each_entry(evlist, evsel) {
+ fprintf(out, "%-36s: %s\n", first ? " Events" : "",
+ perf_evsel__name(evsel));
+ first = false;
+ }
+ fprintf(out, " Cachelines sort on : %s HITMs\n",
+ display_str[c2c.display]);
+ fprintf(out, " Cacheline data grouping : %s\n", c2c.cl_sort);
+}
+
+static void perf_c2c__hists_fprintf(FILE *out, struct perf_session *session)
+{
+ setup_pager();
+
+ print_c2c__display_stats(out);
+ fprintf(out, "\n");
+ print_shared_cacheline_info(out);
+ fprintf(out, "\n");
+ print_c2c_info(out, session);
+
+ if (c2c.stats_only)
+ return;
+
+ fprintf(out, "\n");
+ fprintf(out, "=================================================\n");
+ fprintf(out, " Shared Data Cache Line Table \n");
+ fprintf(out, "=================================================\n");
+ fprintf(out, "#\n");
+
+ hists__fprintf(&c2c.hists.hists, true, 0, 0, 0, stdout, false);
+
+ fprintf(out, "\n");
+ fprintf(out, "=================================================\n");
+ fprintf(out, " Shared Cache Line Distribution Pareto \n");
+ fprintf(out, "=================================================\n");
+ fprintf(out, "#\n");
+
+ print_pareto(out);
+}
+
+#ifdef HAVE_SLANG_SUPPORT
+static void c2c_browser__update_nr_entries(struct hist_browser *hb)
+{
+ u64 nr_entries = 0;
+ struct rb_node *nd = rb_first(&hb->hists->entries);
+
+ while (nd) {
+ struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
+
+ if (!he->filtered)
+ nr_entries++;
+
+ nd = rb_next(nd);
+ }
+
+ hb->nr_non_filtered_entries = nr_entries;
+}
+
+struct c2c_cacheline_browser {
+ struct hist_browser hb;
+ struct hist_entry *he;
+};
+
+static int
+perf_c2c_cacheline_browser__title(struct hist_browser *browser,
+ char *bf, size_t size)
+{
+ struct c2c_cacheline_browser *cl_browser;
+ struct hist_entry *he;
+ uint64_t addr = 0;
+
+ cl_browser = container_of(browser, struct c2c_cacheline_browser, hb);
+ he = cl_browser->he;
+
+ if (he->mem_info)
+ addr = cl_address(he->mem_info->daddr.addr);
+
+ scnprintf(bf, size, "Cacheline 0x%lx", addr);
+ return 0;
+}
+
+static struct c2c_cacheline_browser*
+c2c_cacheline_browser__new(struct hists *hists, struct hist_entry *he)
+{
+ struct c2c_cacheline_browser *browser;
+
+ browser = zalloc(sizeof(*browser));
+ if (browser) {
+ hist_browser__init(&browser->hb, hists);
+ browser->hb.c2c_filter = true;
+ browser->hb.title = perf_c2c_cacheline_browser__title;
+ browser->he = he;
+ }
+
+ return browser;
+}
+
+static int perf_c2c__browse_cacheline(struct hist_entry *he)
+{
+ struct c2c_hist_entry *c2c_he;
+ struct c2c_hists *c2c_hists;
+ struct c2c_cacheline_browser *cl_browser;
+ struct hist_browser *browser;
+ int key = -1;
+ const char help[] =
+ " ENTER Togle callchains (if present) \n"
+ " n Togle Node details info \n"
+ " s Togle full lenght of symbol and source line columns \n"
+ " q Return back to cacheline list \n";
+
+ /* Display compact version first. */
+ c2c.symbol_full = false;
+
+ c2c_he = container_of(he, struct c2c_hist_entry, he);
+ c2c_hists = c2c_he->hists;
+
+ cl_browser = c2c_cacheline_browser__new(&c2c_hists->hists, he);
+ if (cl_browser == NULL)
+ return -1;
+
+ browser = &cl_browser->hb;
+
+ /* reset abort key so that it can get Ctrl-C as a key */
+ SLang_reset_tty();
+ SLang_init_tty(0, 0, 0);
+
+ c2c_browser__update_nr_entries(browser);
+
+ while (1) {
+ key = hist_browser__run(browser, "? - help");
+
+ switch (key) {
+ case 's':
+ c2c.symbol_full = !c2c.symbol_full;
+ break;
+ case 'n':
+ c2c.node_info = (c2c.node_info + 1) % 3;
+ setup_nodes_header();
+ break;
+ case 'q':
+ goto out;
+ case '?':
+ ui_browser__help_window(&browser->b, help);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ free(cl_browser);
+ return 0;
+}
+
+static int perf_c2c_browser__title(struct hist_browser *browser,
+ char *bf, size_t size)
+{
+ scnprintf(bf, size,
+ "Shared Data Cache Line Table "
+ "(%lu entries, sorted on %s HITMs)",
+ browser->nr_non_filtered_entries,
+ display_str[c2c.display]);
+ return 0;
+}
+
+static struct hist_browser*
+perf_c2c_browser__new(struct hists *hists)
+{
+ struct hist_browser *browser = hist_browser__new(hists);
+
+ if (browser) {
+ browser->title = perf_c2c_browser__title;
+ browser->c2c_filter = true;
+ }
+
+ return browser;
+}
+
+static int perf_c2c__hists_browse(struct hists *hists)
+{
+ struct hist_browser *browser;
+ int key = -1;
+ const char help[] =
+ " d Display cacheline details \n"
+ " ENTER Togle callchains (if present) \n"
+ " q Quit \n";
+
+ browser = perf_c2c_browser__new(hists);
+ if (browser == NULL)
+ return -1;
+
+ /* reset abort key so that it can get Ctrl-C as a key */
+ SLang_reset_tty();
+ SLang_init_tty(0, 0, 0);
+
+ c2c_browser__update_nr_entries(browser);
+
+ while (1) {
+ key = hist_browser__run(browser, "? - help");
+
+ switch (key) {
+ case 'q':
+ goto out;
+ case 'd':
+ perf_c2c__browse_cacheline(browser->he_selection);
+ break;
+ case '?':
+ ui_browser__help_window(&browser->b, help);
+ break;
+ default:
+ break;
+ }
+ }
+
+out:
+ hist_browser__delete(browser);
+ return 0;
+}
+
+static void perf_c2c_display(struct perf_session *session)
+{
+ if (c2c.use_stdio)
+ perf_c2c__hists_fprintf(stdout, session);
+ else
+ perf_c2c__hists_browse(&c2c.hists.hists);
+}
+#else
+static void perf_c2c_display(struct perf_session *session)
+{
+ use_browser = 0;
+ perf_c2c__hists_fprintf(stdout, session);
+}
+#endif /* HAVE_SLANG_SUPPORT */
+
+static void ui_quirks(void)
+{
+ if (!c2c.use_stdio) {
+ dim_offset.width = 5;
+ dim_offset.header = header_offset_tui;
+ }
+
+ dim_percent_hitm.header = percent_hitm_header[c2c.display];
+}
+
+#define CALLCHAIN_DEFAULT_OPT "graph,0.5,caller,function,percent"
+
+const char callchain_help[] = "Display call graph (stack chain/backtrace):\n\n"
+ CALLCHAIN_REPORT_HELP
+ "\n\t\t\t\tDefault: " CALLCHAIN_DEFAULT_OPT;
+
+static int
+parse_callchain_opt(const struct option *opt, const char *arg, int unset)
+{
+ struct callchain_param *callchain = opt->value;
+
+ callchain->enabled = !unset;
+ /*
+ * --no-call-graph
+ */
+ if (unset) {
+ symbol_conf.use_callchain = false;
+ callchain->mode = CHAIN_NONE;
+ return 0;
+ }
+
+ return parse_callchain_report_opt(arg);
+}
+
+static int setup_callchain(struct perf_evlist *evlist)
+{
+ u64 sample_type = perf_evlist__combined_sample_type(evlist);
+ enum perf_call_graph_mode mode = CALLCHAIN_NONE;
+
+ if ((sample_type & PERF_SAMPLE_REGS_USER) &&
+ (sample_type & PERF_SAMPLE_STACK_USER))
+ mode = CALLCHAIN_DWARF;
+ else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ mode = CALLCHAIN_LBR;
+ else if (sample_type & PERF_SAMPLE_CALLCHAIN)
+ mode = CALLCHAIN_FP;
+
+ if (!callchain_param.enabled &&
+ callchain_param.mode != CHAIN_NONE &&
+ mode != CALLCHAIN_NONE) {
+ symbol_conf.use_callchain = true;
+ if (callchain_register_param(&callchain_param) < 0) {
+ ui__error("Can't register callchain params.\n");
+ return -EINVAL;
+ }
+ }
+
+ callchain_param.record_mode = mode;
+ callchain_param.min_percent = 0;
+ return 0;
+}
+
+static int setup_display(const char *str)
+{
+ const char *display = str ?: "tot";
+
+ if (!strcmp(display, "tot"))
+ c2c.display = DISPLAY_TOT;
+ else if (!strcmp(display, "rmt"))
+ c2c.display = DISPLAY_RMT;
+ else if (!strcmp(display, "lcl"))
+ c2c.display = DISPLAY_LCL;
+ else {
+ pr_err("failed: unknown display type: %s\n", str);
+ return -1;
+ }
+
+ return 0;
+}
+
+#define for_each_token(__tok, __buf, __sep, __tmp) \
+ for (__tok = strtok_r(__buf, __sep, &__tmp); __tok; \
+ __tok = strtok_r(NULL, __sep, &__tmp))
+
+static int build_cl_output(char *cl_sort, bool no_source)
+{
+ char *tok, *tmp, *buf = strdup(cl_sort);
+ bool add_pid = false;
+ bool add_tid = false;
+ bool add_iaddr = false;
+ bool add_sym = false;
+ bool add_dso = false;
+ bool add_src = false;
+
+ if (!buf)
+ return -ENOMEM;
+
+ for_each_token(tok, buf, ",", tmp) {
+ if (!strcmp(tok, "tid")) {
+ add_tid = true;
+ } else if (!strcmp(tok, "pid")) {
+ add_pid = true;
+ } else if (!strcmp(tok, "iaddr")) {
+ add_iaddr = true;
+ add_sym = true;
+ add_dso = true;
+ add_src = no_source ? false : true;
+ } else if (!strcmp(tok, "dso")) {
+ add_dso = true;
+ } else if (strcmp(tok, "offset")) {
+ pr_err("unrecognized sort token: %s\n", tok);
+ return -EINVAL;
+ }
+ }
+
+ if (asprintf(&c2c.cl_output,
+ "%s%s%s%s%s%s%s%s%s%s",
+ c2c.use_stdio ? "cl_num_empty," : "",
+ "percent_rmt_hitm,"
+ "percent_lcl_hitm,"
+ "percent_stores_l1hit,"
+ "percent_stores_l1miss,"
+ "offset,",
+ add_pid ? "pid," : "",
+ add_tid ? "tid," : "",
+ add_iaddr ? "iaddr," : "",
+ "mean_rmt,"
+ "mean_lcl,"
+ "mean_load,"
+ "cpucnt,",
+ add_sym ? "symbol," : "",
+ add_dso ? "dso," : "",
+ add_src ? "cl_srcline," : "",
+ "node") < 0)
+ return -ENOMEM;
+
+ c2c.show_src = add_src;
+
+ free(buf);
+ return 0;
+}
+
+static int setup_coalesce(const char *coalesce, bool no_source)
+{
+ const char *c = coalesce ?: coalesce_default;
+
+ if (asprintf(&c2c.cl_sort, "offset,%s", c) < 0)
+ return -ENOMEM;
+
+ if (build_cl_output(c2c.cl_sort, no_source))
+ return -1;
+
+ if (asprintf(&c2c.cl_resort, "offset,%s",
+ c2c.display == DISPLAY_TOT ?
+ "tot_hitm" :
+ c2c.display == DISPLAY_RMT ?
+ "rmt_hitm,lcl_hitm" :
+ "lcl_hitm,rmt_hitm") < 0)
+ return -ENOMEM;
+
+ pr_debug("coalesce sort fields: %s\n", c2c.cl_sort);
+ pr_debug("coalesce resort fields: %s\n", c2c.cl_resort);
+ pr_debug("coalesce output fields: %s\n", c2c.cl_output);
+ return 0;
+}
+
+static int perf_c2c__report(int argc, const char **argv)
+{
+ struct perf_session *session;
+ struct ui_progress prog;
+ struct perf_data_file file = {
+ .mode = PERF_DATA_MODE_READ,
+ };
+ char callchain_default_opt[] = CALLCHAIN_DEFAULT_OPT;
+ const char *display = NULL;
+ const char *coalesce = NULL;
+ bool no_source = false;
+ const struct option options[] = {
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "the input file to process"),
+ OPT_INCR('N', "node-info", &c2c.node_info,
+ "show extra node info in report (repeat for more info)"),
+#ifdef HAVE_SLANG_SUPPORT
+ OPT_BOOLEAN(0, "stdio", &c2c.use_stdio, "Use the stdio interface"),
+#endif
+ OPT_BOOLEAN(0, "stats", &c2c.stats_only,
+ "Use the stdio interface"),
+ OPT_BOOLEAN(0, "full-symbols", &c2c.symbol_full,
+ "Display full length of symbols"),
+ OPT_BOOLEAN(0, "no-source", &no_source,
+ "Do not display Source Line column"),
+ OPT_BOOLEAN(0, "show-all", &c2c.show_all,
+ "Show all captured HITM lines."),
+ OPT_CALLBACK_DEFAULT('g', "call-graph", &callchain_param,
+ "print_type,threshold[,print_limit],order,sort_key[,branch],value",
+ callchain_help, &parse_callchain_opt,
+ callchain_default_opt),
+ OPT_STRING('d', "display", &display, "Switch HITM output type", "lcl,rmt"),
+ OPT_STRING('c', "coalesce", &coalesce, "coalesce fields",
+ "coalesce fields: pid,tid,iaddr,dso"),
+ OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
+ OPT_PARENT(c2c_options),
+ OPT_END()
+ };
+ int err = 0;
+
+ argc = parse_options(argc, argv, options, report_c2c_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (argc)
+ usage_with_options(report_c2c_usage, options);
+
+ if (c2c.stats_only)
+ c2c.use_stdio = true;
+
+ if (!input_name || !strlen(input_name))
+ input_name = "perf.data";
+
+ file.path = input_name;
+ file.force = symbol_conf.force;
+
+ err = setup_display(display);
+ if (err)
+ goto out;
+
+ err = setup_coalesce(coalesce, no_source);
+ if (err) {
+ pr_debug("Failed to initialize hists\n");
+ goto out;
+ }
+
+ err = c2c_hists__init(&c2c.hists, "dcacheline", 2);
+ if (err) {
+ pr_debug("Failed to initialize hists\n");
+ goto out;
+ }
+
+ session = perf_session__new(&file, 0, &c2c.tool);
+ if (session == NULL) {
+ pr_debug("No memory for session\n");
+ goto out;
+ }
+
+ err = setup_nodes(session);
+ if (err) {
+ pr_err("Failed setup nodes\n");
+ goto out;
+ }
+
+ err = setup_callchain(session->evlist);
+ if (err)
+ goto out_session;
+
+ if (symbol__init(&session->header.env) < 0)
+ goto out_session;
+
+ /* No pipe support at the moment. */
+ if (perf_data_file__is_pipe(session->file)) {
+ pr_debug("No pipe support at the moment.\n");
+ goto out_session;
+ }
+
+ if (c2c.use_stdio)
+ use_browser = 0;
+ else
+ use_browser = 1;
+
+ setup_browser(false);
+
+ err = perf_session__process_events(session);
+ if (err) {
+ pr_err("failed to process sample\n");
+ goto out_session;
+ }
+
+ c2c_hists__reinit(&c2c.hists,
+ "cl_idx,"
+ "dcacheline,"
+ "tot_recs,"
+ "percent_hitm,"
+ "tot_hitm,lcl_hitm,rmt_hitm,"
+ "stores,stores_l1hit,stores_l1miss,"
+ "dram_lcl,dram_rmt,"
+ "ld_llcmiss,"
+ "tot_loads,"
+ "ld_fbhit,ld_l1hit,ld_l2hit,"
+ "ld_lclhit,ld_rmthit",
+ c2c.display == DISPLAY_TOT ? "tot_hitm" :
+ c2c.display == DISPLAY_LCL ? "lcl_hitm" : "rmt_hitm"
+ );
+
+ ui_progress__init(&prog, c2c.hists.hists.nr_entries, "Sorting...");
+
+ hists__collapse_resort(&c2c.hists.hists, NULL);
+ hists__output_resort_cb(&c2c.hists.hists, &prog, resort_hitm_cb);
+ hists__iterate_cb(&c2c.hists.hists, resort_cl_cb);
+
+ ui_progress__finish();
+
+ ui_quirks();
+
+ perf_c2c_display(session);
+
+out_session:
+ perf_session__delete(session);
+out:
+ return err;
+}
+
+static int parse_record_events(const struct option *opt __maybe_unused,
+ const char *str, int unset __maybe_unused)
+{
+ bool *event_set = (bool *) opt->value;
+
+ *event_set = true;
+ return perf_mem_events__parse(str);
+}
+
+
+static const char * const __usage_record[] = {
+ "perf c2c record [<options>] [<command>]",
+ "perf c2c record [<options>] -- <command> [<options>]",
+ NULL
+};
+
+static const char * const *record_mem_usage = __usage_record;
+
+static int perf_c2c__record(int argc, const char **argv)
+{
+ int rec_argc, i = 0, j;
+ const char **rec_argv;
+ int ret;
+ bool all_user = false, all_kernel = false;
+ bool event_set = false;
+ struct option options[] = {
+ OPT_CALLBACK('e', "event", &event_set, "event",
+ "event selector. Use 'perf mem record -e list' to list available events",
+ parse_record_events),
+ OPT_BOOLEAN('u', "all-user", &all_user, "collect only user level data"),
+ OPT_BOOLEAN('k', "all-kernel", &all_kernel, "collect only kernel level data"),
+ OPT_UINTEGER('l', "ldlat", &perf_mem_events__loads_ldlat, "setup mem-loads latency"),
+ OPT_PARENT(c2c_options),
+ OPT_END()
+ };
+
+ if (perf_mem_events__init()) {
+ pr_err("failed: memory events not supported\n");
+ return -1;
+ }
+
+ argc = parse_options(argc, argv, options, record_mem_usage,
+ PARSE_OPT_KEEP_UNKNOWN);
+
+ rec_argc = argc + 10; /* max number of arguments */
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ if (!rec_argv)
+ return -1;
+
+ rec_argv[i++] = "record";
+
+ if (!event_set) {
+ perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true;
+ perf_mem_events[PERF_MEM_EVENTS__STORE].record = true;
+ }
+
+ if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record)
+ rec_argv[i++] = "-W";
+
+ rec_argv[i++] = "-d";
+ rec_argv[i++] = "--sample-cpu";
+
+ for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
+ if (!perf_mem_events[j].record)
+ continue;
+
+ if (!perf_mem_events[j].supported) {
+ pr_err("failed: event '%s' not supported\n",
+ perf_mem_events[j].name);
+ return -1;
+ }
+
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = perf_mem_events__name(j);
+ };
+
+ if (all_user)
+ rec_argv[i++] = "--all-user";
+
+ if (all_kernel)
+ rec_argv[i++] = "--all-kernel";
+
+ for (j = 0; j < argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ if (verbose > 0) {
+ pr_debug("calling: ");
+
+ j = 0;
+
+ while (rec_argv[j]) {
+ pr_debug("%s ", rec_argv[j]);
+ j++;
+ }
+ pr_debug("\n");
+ }
+
+ ret = cmd_record(i, rec_argv, NULL);
+ free(rec_argv);
+ return ret;
+}
+
+int cmd_c2c(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+ argc = parse_options(argc, argv, c2c_options, c2c_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (!argc)
+ usage_with_options(c2c_usage, c2c_options);
+
+ if (!strncmp(argv[0], "rec", 3)) {
+ return perf_c2c__record(argc, argv);
+ } else if (!strncmp(argv[0], "rep", 3)) {
+ return perf_c2c__report(argc, argv);
+ } else {
+ usage_with_options(c2c_usage, c2c_options);
+ }
+
+ return 0;
+}
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index e4207a23b52c..8c0d93b7c2f0 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -17,7 +17,7 @@
static bool use_system_config, use_user_config;
static const char * const config_usage[] = {
- "perf config [<file-option>] [options]",
+ "perf config [<file-option>] [options] [section.name[=value] ...]",
NULL
};
@@ -33,6 +33,73 @@ static struct option config_options[] = {
OPT_END()
};
+static int set_config(struct perf_config_set *set, const char *file_name,
+ const char *var, const char *value)
+{
+ struct perf_config_section *section = NULL;
+ struct perf_config_item *item = NULL;
+ const char *first_line = "# this file is auto-generated.";
+ FILE *fp;
+
+ if (set == NULL)
+ return -1;
+
+ fp = fopen(file_name, "w");
+ if (!fp)
+ return -1;
+
+ perf_config_set__collect(set, file_name, var, value);
+ fprintf(fp, "%s\n", first_line);
+
+ /* overwrite configvariables */
+ perf_config_items__for_each_entry(&set->sections, section) {
+ if (!use_system_config && section->from_system_config)
+ continue;
+ fprintf(fp, "[%s]\n", section->name);
+
+ perf_config_items__for_each_entry(&section->items, item) {
+ if (!use_system_config && section->from_system_config)
+ continue;
+ if (item->value)
+ fprintf(fp, "\t%s = %s\n",
+ item->name, item->value);
+ }
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+static int show_spec_config(struct perf_config_set *set, const char *var)
+{
+ struct perf_config_section *section;
+ struct perf_config_item *item;
+
+ if (set == NULL)
+ return -1;
+
+ perf_config_items__for_each_entry(&set->sections, section) {
+ if (prefixcmp(var, section->name) != 0)
+ continue;
+
+ perf_config_items__for_each_entry(&section->items, item) {
+ const char *name = var + strlen(section->name) + 1;
+
+ if (strcmp(name, item->name) == 0) {
+ char *value = item->value;
+
+ if (value) {
+ printf("%s=%s\n", var, value);
+ return 0;
+ }
+ }
+
+ }
+ }
+
+ return 0;
+}
+
static int show_config(struct perf_config_set *set)
{
struct perf_config_section *section;
@@ -52,9 +119,44 @@ static int show_config(struct perf_config_set *set)
return 0;
}
+static int parse_config_arg(char *arg, char **var, char **value)
+{
+ const char *last_dot = strchr(arg, '.');
+
+ /*
+ * Since "var" actually contains the section name and the real
+ * config variable name separated by a dot, we have to know where the dot is.
+ */
+ if (last_dot == NULL || last_dot == arg) {
+ pr_err("The config variable does not contain a section name: %s\n", arg);
+ return -1;
+ }
+ if (!last_dot[1]) {
+ pr_err("The config variable does not contain a variable name: %s\n", arg);
+ return -1;
+ }
+
+ *value = strchr(arg, '=');
+ if (*value == NULL)
+ *var = arg;
+ else if (!strcmp(*value, "=")) {
+ pr_err("The config variable does not contain a value: %s\n", arg);
+ return -1;
+ } else {
+ *value = *value + 1; /* excluding a first character '=' */
+ *var = strsep(&arg, "=");
+ if (*var[0] == '\0') {
+ pr_err("invalid config variable: %s\n", arg);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
{
- int ret = 0;
+ int i, ret = 0;
struct perf_config_set *set;
char *user_config = mkpath("%s/.perfconfig", getenv("HOME"));
@@ -100,7 +202,36 @@ int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
}
break;
default:
- usage_with_options(config_usage, config_options);
+ if (argc) {
+ for (i = 0; argv[i]; i++) {
+ char *var, *value;
+ char *arg = strdup(argv[i]);
+
+ if (!arg) {
+ pr_err("%s: strdup failed\n", __func__);
+ ret = -1;
+ break;
+ }
+
+ if (parse_config_arg(arg, &var, &value) < 0) {
+ free(arg);
+ ret = -1;
+ break;
+ }
+
+ if (value == NULL)
+ ret = show_spec_config(set, var);
+ else {
+ const char *config_filename = config_exclusive_filename;
+
+ if (!config_exclusive_filename)
+ config_filename = user_config;
+ ret = set_config(set, config_filename, var, value);
+ }
+ free(arg);
+ }
+ } else
+ usage_with_options(config_usage, config_options);
}
perf_config_set__delete(set);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index d426dcb18ce9..35a02f8e5a4a 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -11,6 +11,7 @@
#include "util/session.h"
#include "util/tool.h"
#include "util/callchain.h"
+#include "util/time-utils.h"
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
@@ -49,6 +50,7 @@ struct alloc_stat {
u64 ptr;
u64 bytes_req;
u64 bytes_alloc;
+ u64 last_alloc;
u32 hit;
u32 pingpong;
@@ -62,9 +64,13 @@ static struct rb_root root_alloc_sorted;
static struct rb_root root_caller_stat;
static struct rb_root root_caller_sorted;
-static unsigned long total_requested, total_allocated;
+static unsigned long total_requested, total_allocated, total_freed;
static unsigned long nr_allocs, nr_cross_allocs;
+/* filters for controlling start and stop of time of analysis */
+static struct perf_time_interval ptime;
+const char *time_str;
+
static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
int bytes_req, int bytes_alloc, int cpu)
{
@@ -105,6 +111,8 @@ static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
}
data->call_site = call_site;
data->alloc_cpu = cpu;
+ data->last_alloc = bytes_alloc;
+
return 0;
}
@@ -223,6 +231,8 @@ static int perf_evsel__process_free_event(struct perf_evsel *evsel,
if (!s_alloc)
return 0;
+ total_freed += s_alloc->last_alloc;
+
if ((short)sample->cpu != s_alloc->alloc_cpu) {
s_alloc->pingpong++;
@@ -907,6 +917,15 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
return 0;
}
+static bool perf_kmem__skip_sample(struct perf_sample *sample)
+{
+ /* skip sample based on time? */
+ if (perf_time__skip_sample(&ptime, sample->time))
+ return true;
+
+ return false;
+}
+
typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
struct perf_sample *sample);
@@ -926,6 +945,9 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
+ if (perf_kmem__skip_sample(sample))
+ return 0;
+
dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
if (evsel->handler != NULL) {
@@ -1128,6 +1150,11 @@ static void print_slab_summary(void)
printf("\n========================\n");
printf("Total bytes requested: %'lu\n", total_requested);
printf("Total bytes allocated: %'lu\n", total_allocated);
+ printf("Total bytes freed: %'lu\n", total_freed);
+ if (total_allocated > total_freed) {
+ printf("Net total bytes allocated: %'lu\n",
+ total_allocated - total_freed);
+ }
printf("Total bytes wasted on internal fragmentation: %'lu\n",
total_allocated - total_requested);
printf("Internal fragmentation: %f%%\n",
@@ -1884,6 +1911,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
parse_page_opt),
OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
+ OPT_STRING(0, "time", &time_str, "str",
+ "Time span of interest (start,stop)"),
OPT_END()
};
const char *const kmem_subcommands[] = { "record", "stat", NULL };
@@ -1944,6 +1973,11 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
symbol__init(&session->header.env);
+ if (perf_time__parse_str(&ptime, time_str) != 0) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+
if (!strcmp(argv[0], "stat")) {
setlocale(LC_ALL, "");
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 67d2a9003294..fa26865364b6 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -37,6 +37,7 @@
#include "util/llvm-utils.h"
#include "util/bpf-loader.h"
#include "util/trigger.h"
+#include "util/perf-hooks.h"
#include "asm/bug.h"
#include <unistd.h>
@@ -206,6 +207,12 @@ static void sig_handler(int sig)
done = 1;
}
+static void sigsegv_handler(int sig)
+{
+ perf_hooks__recover();
+ sighandler_dump_stack(sig);
+}
+
static void record__sig_exit(void)
{
if (signr == -1)
@@ -833,6 +840,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGCHLD, sig_handler);
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
+ signal(SIGSEGV, sigsegv_handler);
if (rec->opts.auxtrace_snapshot_mode || rec->switch_output) {
signal(SIGUSR2, snapshot_sig_handler);
@@ -970,6 +978,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
trigger_ready(&auxtrace_snapshot_trigger);
trigger_ready(&switch_output_trigger);
+ perf_hooks__invoke_record_start();
for (;;) {
unsigned long long hits = rec->samples;
@@ -1114,6 +1123,8 @@ out_child:
}
}
+ perf_hooks__invoke_record_end();
+
if (!err && !quiet) {
char samples[128];
const char *postfix = rec->timestamp_filename ?
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 6e88460cd13d..d2afbe4a240d 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -36,7 +36,7 @@
#include "util/hist.h"
#include "util/data.h"
#include "arch/common.h"
-
+#include "util/time-utils.h"
#include "util/auxtrace.h"
#include <dlfcn.h>
@@ -59,6 +59,8 @@ struct report {
const char *pretty_printing_style;
const char *cpu_list;
const char *symbol_filter_str;
+ const char *time_str;
+ struct perf_time_interval ptime;
float min_percent;
u64 nr_entries;
u64 queue_size;
@@ -158,6 +160,9 @@ static int process_sample_event(struct perf_tool *tool,
};
int ret = 0;
+ if (perf_time__skip_sample(&rep->ptime, sample->time))
+ return 0;
+
if (machine__resolve(machine, &al, sample) < 0) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -207,11 +212,14 @@ static int process_read_event(struct perf_tool *tool,
if (rep->show_threads) {
const char *name = evsel ? perf_evsel__name(evsel) : "unknown";
- perf_read_values_add_value(&rep->show_threads_values,
+ int err = perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
event->read.id,
name,
event->read.value);
+
+ if (err)
+ return err;
}
dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
@@ -539,8 +547,11 @@ static int __cmd_report(struct report *rep)
}
}
- if (rep->show_threads)
- perf_read_values_init(&rep->show_threads_values);
+ if (rep->show_threads) {
+ ret = perf_read_values_init(&rep->show_threads_values);
+ if (ret)
+ return ret;
+ }
ret = report__setup_sample_type(rep);
if (ret) {
@@ -824,6 +835,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
"'always' (default), 'never' or 'auto' only applicable to --stdio mode",
stdio__config_color, "always"),
+ OPT_STRING(0, "time", &report.time_str, "str",
+ "Time span of interest (start,stop)"),
OPT_END()
};
struct perf_data_file file = {
@@ -905,6 +918,9 @@ repeat:
if (itrace_synth_opts.last_branch)
has_br_stack = true;
+ if (has_br_stack && branch_call_mode)
+ symbol_conf.show_branchflag_count = true;
+
/*
* Branch mode is a tristate:
* -1 means default, so decide based on the file having branch data.
@@ -1006,6 +1022,11 @@ repeat:
if (symbol__init(&session->header.env) < 0)
goto error;
+ if (perf_time__parse_str(&report.ptime, report.time_str) != 0) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+
sort__setup_elide(stdout);
ret = __cmd_report(&report);
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index f5503ca22e1c..1a3f1be93372 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -13,12 +13,16 @@
#include "util/cloexec.h"
#include "util/thread_map.h"
#include "util/color.h"
+#include "util/stat.h"
+#include "util/callchain.h"
+#include "util/time-utils.h"
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
#include "util/debug.h"
+#include <linux/log2.h>
#include <sys/prctl.h>
#include <sys/resource.h>
@@ -192,8 +196,45 @@ struct perf_sched {
bool force;
bool skip_merge;
struct perf_sched_map map;
+
+ /* options for timehist command */
+ bool summary;
+ bool summary_only;
+ bool show_callchain;
+ unsigned int max_stack;
+ bool show_cpu_visual;
+ bool show_wakeups;
+ bool show_migrations;
+ u64 skipped_samples;
+ const char *time_str;
+ struct perf_time_interval ptime;
+};
+
+/* per thread run time data */
+struct thread_runtime {
+ u64 last_time; /* time of previous sched in/out event */
+ u64 dt_run; /* run time */
+ u64 dt_wait; /* time between CPU access (off cpu) */
+ u64 dt_delay; /* time between wakeup and sched-in */
+ u64 ready_to_run; /* time of wakeup */
+
+ struct stats run_stats;
+ u64 total_run_time;
+
+ u64 migrations;
+};
+
+/* per event run time data */
+struct evsel_runtime {
+ u64 *last_time; /* time this event was last seen per cpu */
+ u32 ncpu; /* highest cpu slot allocated */
};
+/* track idle times per cpu */
+static struct thread **idle_threads;
+static int idle_max_cpu;
+static char idle_comm[] = "<idle>";
+
static u64 get_nsecs(void)
{
struct timespec ts;
@@ -1191,6 +1232,7 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
int i;
int ret;
u64 avg;
+ char max_lat_at[32];
if (!work_list->nb_atoms)
return;
@@ -1212,12 +1254,13 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
printf(" ");
avg = work_list->total_lat / work_list->nb_atoms;
+ timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
- printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n",
+ printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
(double)work_list->total_runtime / NSEC_PER_MSEC,
work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
(double)work_list->max_lat / NSEC_PER_MSEC,
- (double)work_list->max_lat_at / NSEC_PER_SEC);
+ max_lat_at);
}
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
@@ -1402,6 +1445,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
int cpus_nr;
bool new_cpu = false;
const char *color = PERF_COLOR_NORMAL;
+ char stimestamp[32];
BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
@@ -1479,7 +1523,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
cpu_color = COLOR_CPUS;
if (cpu != this_cpu)
- color_fprintf(stdout, cpu_color, " ");
+ color_fprintf(stdout, color, " ");
else
color_fprintf(stdout, cpu_color, "*");
@@ -1492,8 +1536,9 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
goto out;
- color_fprintf(stdout, color, " %12.6f secs ", (double)timestamp / NSEC_PER_SEC);
- if (new_shortname) {
+ timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
+ color_fprintf(stdout, color, " %12s secs ", stimestamp);
+ if (new_shortname || (verbose && sched_in->tid)) {
const char *pid_color = color;
if (thread__has_color(sched_in))
@@ -1650,6 +1695,988 @@ out_delete:
return rc;
}
+/*
+ * scheduling times are printed as msec.usec
+ */
+static inline void print_sched_time(unsigned long long nsecs, int width)
+{
+ unsigned long msecs;
+ unsigned long usecs;
+
+ msecs = nsecs / NSEC_PER_MSEC;
+ nsecs -= msecs * NSEC_PER_MSEC;
+ usecs = nsecs / NSEC_PER_USEC;
+ printf("%*lu.%03lu ", width, msecs, usecs);
+}
+
+/*
+ * returns runtime data for event, allocating memory for it the
+ * first time it is used.
+ */
+static struct evsel_runtime *perf_evsel__get_runtime(struct perf_evsel *evsel)
+{
+ struct evsel_runtime *r = evsel->priv;
+
+ if (r == NULL) {
+ r = zalloc(sizeof(struct evsel_runtime));
+ evsel->priv = r;
+ }
+
+ return r;
+}
+
+/*
+ * save last time event was seen per cpu
+ */
+static void perf_evsel__save_time(struct perf_evsel *evsel,
+ u64 timestamp, u32 cpu)
+{
+ struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
+
+ if (r == NULL)
+ return;
+
+ if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
+ int i, n = __roundup_pow_of_two(cpu+1);
+ void *p = r->last_time;
+
+ p = realloc(r->last_time, n * sizeof(u64));
+ if (!p)
+ return;
+
+ r->last_time = p;
+ for (i = r->ncpu; i < n; ++i)
+ r->last_time[i] = (u64) 0;
+
+ r->ncpu = n;
+ }
+
+ r->last_time[cpu] = timestamp;
+}
+
+/* returns last time this event was seen on the given cpu */
+static u64 perf_evsel__get_time(struct perf_evsel *evsel, u32 cpu)
+{
+ struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
+
+ if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
+ return 0;
+
+ return r->last_time[cpu];
+}
+
+static int comm_width = 20;
+
+static char *timehist_get_commstr(struct thread *thread)
+{
+ static char str[32];
+ const char *comm = thread__comm_str(thread);
+ pid_t tid = thread->tid;
+ pid_t pid = thread->pid_;
+ int n;
+
+ if (pid == 0)
+ n = scnprintf(str, sizeof(str), "%s", comm);
+
+ else if (tid != pid)
+ n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
+
+ else
+ n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
+
+ if (n > comm_width)
+ comm_width = n;
+
+ return str;
+}
+
+static void timehist_header(struct perf_sched *sched)
+{
+ u32 ncpus = sched->max_cpu + 1;
+ u32 i, j;
+
+ printf("%15s %6s ", "time", "cpu");
+
+ if (sched->show_cpu_visual) {
+ printf(" ");
+ for (i = 0, j = 0; i < ncpus; ++i) {
+ printf("%x", j++);
+ if (j > 15)
+ j = 0;
+ }
+ printf(" ");
+ }
+
+ printf(" %-20s %9s %9s %9s",
+ "task name", "wait time", "sch delay", "run time");
+
+ printf("\n");
+
+ /*
+ * units row
+ */
+ printf("%15s %-6s ", "", "");
+
+ if (sched->show_cpu_visual)
+ printf(" %*s ", ncpus, "");
+
+ printf(" %-20s %9s %9s %9s\n", "[tid/pid]", "(msec)", "(msec)", "(msec)");
+
+ /*
+ * separator
+ */
+ printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
+
+ if (sched->show_cpu_visual)
+ printf(" %.*s ", ncpus, graph_dotted_line);
+
+ printf(" %.20s %.9s %.9s %.9s",
+ graph_dotted_line, graph_dotted_line, graph_dotted_line,
+ graph_dotted_line);
+
+ printf("\n");
+}
+
+static void timehist_print_sample(struct perf_sched *sched,
+ struct perf_sample *sample,
+ struct addr_location *al,
+ struct thread *thread,
+ u64 t)
+{
+ struct thread_runtime *tr = thread__priv(thread);
+ u32 max_cpus = sched->max_cpu + 1;
+ char tstr[64];
+
+ timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
+ printf("%15s [%04d] ", tstr, sample->cpu);
+
+ if (sched->show_cpu_visual) {
+ u32 i;
+ char c;
+
+ printf(" ");
+ for (i = 0; i < max_cpus; ++i) {
+ /* flag idle times with 'i'; others are sched events */
+ if (i == sample->cpu)
+ c = (thread->tid == 0) ? 'i' : 's';
+ else
+ c = ' ';
+ printf("%c", c);
+ }
+ printf(" ");
+ }
+
+ printf(" %-*s ", comm_width, timehist_get_commstr(thread));
+
+ print_sched_time(tr->dt_wait, 6);
+ print_sched_time(tr->dt_delay, 6);
+ print_sched_time(tr->dt_run, 6);
+
+ if (sched->show_wakeups)
+ printf(" %-*s", comm_width, "");
+
+ if (thread->tid == 0)
+ goto out;
+
+ if (sched->show_callchain)
+ printf(" ");
+
+ sample__fprintf_sym(sample, al, 0,
+ EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
+ EVSEL__PRINT_CALLCHAIN_ARROW |
+ EVSEL__PRINT_SKIP_IGNORED,
+ &callchain_cursor, stdout);
+
+out:
+ printf("\n");
+}
+
+/*
+ * Explanation of delta-time stats:
+ *
+ * t = time of current schedule out event
+ * tprev = time of previous sched out event
+ * also time of schedule-in event for current task
+ * last_time = time of last sched change event for current task
+ * (i.e, time process was last scheduled out)
+ * ready_to_run = time of wakeup for current task
+ *
+ * -----|------------|------------|------------|------
+ * last ready tprev t
+ * time to run
+ *
+ * |-------- dt_wait --------|
+ * |- dt_delay -|-- dt_run --|
+ *
+ * dt_run = run time of current task
+ * dt_wait = time between last schedule out event for task and tprev
+ * represents time spent off the cpu
+ * dt_delay = time between wakeup and schedule-in of task
+ */
+
+static void timehist_update_runtime_stats(struct thread_runtime *r,
+ u64 t, u64 tprev)
+{
+ r->dt_delay = 0;
+ r->dt_wait = 0;
+ r->dt_run = 0;
+ if (tprev) {
+ r->dt_run = t - tprev;
+ if (r->ready_to_run) {
+ if (r->ready_to_run > tprev)
+ pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
+ else
+ r->dt_delay = tprev - r->ready_to_run;
+ }
+
+ if (r->last_time > tprev)
+ pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
+ else if (r->last_time)
+ r->dt_wait = tprev - r->last_time;
+ }
+
+ update_stats(&r->run_stats, r->dt_run);
+ r->total_run_time += r->dt_run;
+}
+
+static bool is_idle_sample(struct perf_sched *sched,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ struct thread *thread;
+ struct callchain_cursor *cursor = &callchain_cursor;
+
+ /* pid 0 == swapper == idle task */
+ if (sample->pid == 0)
+ return true;
+
+ if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0) {
+ if (perf_evsel__intval(evsel, sample, "prev_pid") == 0)
+ return true;
+ }
+
+ /* want main thread for process - has maps */
+ thread = machine__findnew_thread(machine, sample->pid, sample->pid);
+ if (thread == NULL) {
+ pr_debug("Failed to get thread for pid %d.\n", sample->pid);
+ return false;
+ }
+
+ if (!symbol_conf.use_callchain || sample->callchain == NULL)
+ return false;
+
+ if (thread__resolve_callchain(thread, cursor, evsel, sample,
+ NULL, NULL, sched->max_stack + 2) != 0) {
+ if (verbose)
+ error("Failed to resolve callchain. Skipping\n");
+
+ return false;
+ }
+
+ callchain_cursor_commit(cursor);
+
+ while (true) {
+ struct callchain_cursor_node *node;
+ struct symbol *sym;
+
+ node = callchain_cursor_current(cursor);
+ if (node == NULL)
+ break;
+
+ sym = node->sym;
+ if (sym && sym->name) {
+ if (!strcmp(sym->name, "schedule") ||
+ !strcmp(sym->name, "__schedule") ||
+ !strcmp(sym->name, "preempt_schedule"))
+ sym->ignore = 1;
+ }
+
+ callchain_cursor_advance(cursor);
+ }
+
+ return false;
+}
+
+/*
+ * Track idle stats per cpu by maintaining a local thread
+ * struct for the idle task on each cpu.
+ */
+static int init_idle_threads(int ncpu)
+{
+ int i;
+
+ idle_threads = zalloc(ncpu * sizeof(struct thread *));
+ if (!idle_threads)
+ return -ENOMEM;
+
+ idle_max_cpu = ncpu;
+
+ /* allocate the actual thread struct if needed */
+ for (i = 0; i < ncpu; ++i) {
+ idle_threads[i] = thread__new(0, 0);
+ if (idle_threads[i] == NULL)
+ return -ENOMEM;
+
+ thread__set_comm(idle_threads[i], idle_comm, 0);
+ }
+
+ return 0;
+}
+
+static void free_idle_threads(void)
+{
+ int i;
+
+ if (idle_threads == NULL)
+ return;
+
+ for (i = 0; i < idle_max_cpu; ++i) {
+ if ((idle_threads[i]))
+ thread__delete(idle_threads[i]);
+ }
+
+ free(idle_threads);
+}
+
+static struct thread *get_idle_thread(int cpu)
+{
+ /*
+ * expand/allocate array of pointers to local thread
+ * structs if needed
+ */
+ if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
+ int i, j = __roundup_pow_of_two(cpu+1);
+ void *p;
+
+ p = realloc(idle_threads, j * sizeof(struct thread *));
+ if (!p)
+ return NULL;
+
+ idle_threads = (struct thread **) p;
+ for (i = idle_max_cpu; i < j; ++i)
+ idle_threads[i] = NULL;
+
+ idle_max_cpu = j;
+ }
+
+ /* allocate a new thread struct if needed */
+ if (idle_threads[cpu] == NULL) {
+ idle_threads[cpu] = thread__new(0, 0);
+
+ if (idle_threads[cpu]) {
+ idle_threads[cpu]->tid = 0;
+ thread__set_comm(idle_threads[cpu], idle_comm, 0);
+ }
+ }
+
+ return idle_threads[cpu];
+}
+
+/*
+ * handle runtime stats saved per thread
+ */
+static struct thread_runtime *thread__init_runtime(struct thread *thread)
+{
+ struct thread_runtime *r;
+
+ r = zalloc(sizeof(struct thread_runtime));
+ if (!r)
+ return NULL;
+
+ init_stats(&r->run_stats);
+ thread__set_priv(thread, r);
+
+ return r;
+}
+
+static struct thread_runtime *thread__get_runtime(struct thread *thread)
+{
+ struct thread_runtime *tr;
+
+ tr = thread__priv(thread);
+ if (tr == NULL) {
+ tr = thread__init_runtime(thread);
+ if (tr == NULL)
+ pr_debug("Failed to malloc memory for runtime data.\n");
+ }
+
+ return tr;
+}
+
+static struct thread *timehist_get_thread(struct perf_sched *sched,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct perf_evsel *evsel)
+{
+ struct thread *thread;
+
+ if (is_idle_sample(sched, sample, evsel, machine)) {
+ thread = get_idle_thread(sample->cpu);
+ if (thread == NULL)
+ pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
+
+ } else {
+ /* there were samples with tid 0 but non-zero pid */
+ thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid ?: sample->pid);
+ if (thread == NULL) {
+ pr_debug("Failed to get thread for tid %d. skipping sample.\n",
+ sample->tid);
+ }
+ }
+
+ return thread;
+}
+
+static bool timehist_skip_sample(struct perf_sched *sched,
+ struct thread *thread)
+{
+ bool rc = false;
+
+ if (thread__is_filtered(thread)) {
+ rc = true;
+ sched->skipped_samples++;
+ }
+
+ return rc;
+}
+
+static void timehist_print_wakeup_event(struct perf_sched *sched,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *awakened)
+{
+ struct thread *thread;
+ char tstr[64];
+
+ thread = machine__findnew_thread(machine, sample->pid, sample->tid);
+ if (thread == NULL)
+ return;
+
+ /* show wakeup unless both awakee and awaker are filtered */
+ if (timehist_skip_sample(sched, thread) &&
+ timehist_skip_sample(sched, awakened)) {
+ return;
+ }
+
+ timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
+ printf("%15s [%04d] ", tstr, sample->cpu);
+ if (sched->show_cpu_visual)
+ printf(" %*s ", sched->max_cpu + 1, "");
+
+ printf(" %-*s ", comm_width, timehist_get_commstr(thread));
+
+ /* dt spacer */
+ printf(" %9s %9s %9s ", "", "", "");
+
+ printf("awakened: %s", timehist_get_commstr(awakened));
+
+ printf("\n");
+}
+
+static int timehist_sched_wakeup_event(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+ struct thread *thread;
+ struct thread_runtime *tr = NULL;
+ /* want pid of awakened task not pid in sample */
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+
+ thread = machine__findnew_thread(machine, 0, pid);
+ if (thread == NULL)
+ return -1;
+
+ tr = thread__get_runtime(thread);
+ if (tr == NULL)
+ return -1;
+
+ if (tr->ready_to_run == 0)
+ tr->ready_to_run = sample->time;
+
+ /* show wakeups if requested */
+ if (sched->show_wakeups &&
+ !perf_time__skip_sample(&sched->ptime, sample->time))
+ timehist_print_wakeup_event(sched, sample, machine, thread);
+
+ return 0;
+}
+
+static void timehist_print_migration_event(struct perf_sched *sched,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct thread *migrated)
+{
+ struct thread *thread;
+ char tstr[64];
+ u32 max_cpus = sched->max_cpu + 1;
+ u32 ocpu, dcpu;
+
+ if (sched->summary_only)
+ return;
+
+ max_cpus = sched->max_cpu + 1;
+ ocpu = perf_evsel__intval(evsel, sample, "orig_cpu");
+ dcpu = perf_evsel__intval(evsel, sample, "dest_cpu");
+
+ thread = machine__findnew_thread(machine, sample->pid, sample->tid);
+ if (thread == NULL)
+ return;
+
+ if (timehist_skip_sample(sched, thread) &&
+ timehist_skip_sample(sched, migrated)) {
+ return;
+ }
+
+ timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
+ printf("%15s [%04d] ", tstr, sample->cpu);
+
+ if (sched->show_cpu_visual) {
+ u32 i;
+ char c;
+
+ printf(" ");
+ for (i = 0; i < max_cpus; ++i) {
+ c = (i == sample->cpu) ? 'm' : ' ';
+ printf("%c", c);
+ }
+ printf(" ");
+ }
+
+ printf(" %-*s ", comm_width, timehist_get_commstr(thread));
+
+ /* dt spacer */
+ printf(" %9s %9s %9s ", "", "", "");
+
+ printf("migrated: %s", timehist_get_commstr(migrated));
+ printf(" cpu %d => %d", ocpu, dcpu);
+
+ printf("\n");
+}
+
+static int timehist_migrate_task_event(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+ struct thread *thread;
+ struct thread_runtime *tr = NULL;
+ /* want pid of migrated task not pid in sample */
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+
+ thread = machine__findnew_thread(machine, 0, pid);
+ if (thread == NULL)
+ return -1;
+
+ tr = thread__get_runtime(thread);
+ if (tr == NULL)
+ return -1;
+
+ tr->migrations++;
+
+ /* show migrations if requested */
+ timehist_print_migration_event(sched, evsel, sample, machine, thread);
+
+ return 0;
+}
+
+static int timehist_sched_change_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+ struct perf_time_interval *ptime = &sched->ptime;
+ struct addr_location al;
+ struct thread *thread;
+ struct thread_runtime *tr = NULL;
+ u64 tprev, t = sample->time;
+ int rc = 0;
+
+ if (machine__resolve(machine, &al, sample) < 0) {
+ pr_err("problem processing %d event. skipping it\n",
+ event->header.type);
+ rc = -1;
+ goto out;
+ }
+
+ thread = timehist_get_thread(sched, sample, machine, evsel);
+ if (thread == NULL) {
+ rc = -1;
+ goto out;
+ }
+
+ if (timehist_skip_sample(sched, thread))
+ goto out;
+
+ tr = thread__get_runtime(thread);
+ if (tr == NULL) {
+ rc = -1;
+ goto out;
+ }
+
+ tprev = perf_evsel__get_time(evsel, sample->cpu);
+
+ /*
+ * If start time given:
+ * - sample time is under window user cares about - skip sample
+ * - tprev is under window user cares about - reset to start of window
+ */
+ if (ptime->start && ptime->start > t)
+ goto out;
+
+ if (ptime->start > tprev)
+ tprev = ptime->start;
+
+ /*
+ * If end time given:
+ * - previous sched event is out of window - we are done
+ * - sample time is beyond window user cares about - reset it
+ * to close out stats for time window interest
+ */
+ if (ptime->end) {
+ if (tprev > ptime->end)
+ goto out;
+
+ if (t > ptime->end)
+ t = ptime->end;
+ }
+
+ timehist_update_runtime_stats(tr, t, tprev);
+
+ if (!sched->summary_only)
+ timehist_print_sample(sched, sample, &al, thread, t);
+
+out:
+ if (tr) {
+ /* time of this sched_switch event becomes last time task seen */
+ tr->last_time = sample->time;
+
+ /* sched out event for task so reset ready to run time */
+ tr->ready_to_run = 0;
+ }
+
+ perf_evsel__save_time(evsel, sample->time, sample->cpu);
+
+ return rc;
+}
+
+static int timehist_sched_switch_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ return timehist_sched_change_event(tool, event, evsel, sample, machine);
+}
+
+static int process_lost(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ char tstr[64];
+
+ timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
+ printf("%15s ", tstr);
+ printf("lost %" PRIu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
+
+ return 0;
+}
+
+
+static void print_thread_runtime(struct thread *t,
+ struct thread_runtime *r)
+{
+ double mean = avg_stats(&r->run_stats);
+ float stddev;
+
+ printf("%*s %5d %9" PRIu64 " ",
+ comm_width, timehist_get_commstr(t), t->ppid,
+ (u64) r->run_stats.n);
+
+ print_sched_time(r->total_run_time, 8);
+ stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
+ print_sched_time(r->run_stats.min, 6);
+ printf(" ");
+ print_sched_time((u64) mean, 6);
+ printf(" ");
+ print_sched_time(r->run_stats.max, 6);
+ printf(" ");
+ printf("%5.2f", stddev);
+ printf(" %5" PRIu64, r->migrations);
+ printf("\n");
+}
+
+struct total_run_stats {
+ u64 sched_count;
+ u64 task_count;
+ u64 total_run_time;
+};
+
+static int __show_thread_runtime(struct thread *t, void *priv)
+{
+ struct total_run_stats *stats = priv;
+ struct thread_runtime *r;
+
+ if (thread__is_filtered(t))
+ return 0;
+
+ r = thread__priv(t);
+ if (r && r->run_stats.n) {
+ stats->task_count++;
+ stats->sched_count += r->run_stats.n;
+ stats->total_run_time += r->total_run_time;
+ print_thread_runtime(t, r);
+ }
+
+ return 0;
+}
+
+static int show_thread_runtime(struct thread *t, void *priv)
+{
+ if (t->dead)
+ return 0;
+
+ return __show_thread_runtime(t, priv);
+}
+
+static int show_deadthread_runtime(struct thread *t, void *priv)
+{
+ if (!t->dead)
+ return 0;
+
+ return __show_thread_runtime(t, priv);
+}
+
+static void timehist_print_summary(struct perf_sched *sched,
+ struct perf_session *session)
+{
+ struct machine *m = &session->machines.host;
+ struct total_run_stats totals;
+ u64 task_count;
+ struct thread *t;
+ struct thread_runtime *r;
+ int i;
+
+ memset(&totals, 0, sizeof(totals));
+
+ if (comm_width < 30)
+ comm_width = 30;
+
+ printf("\nRuntime summary\n");
+ printf("%*s parent sched-in ", comm_width, "comm");
+ printf(" run-time min-run avg-run max-run stddev migrations\n");
+ printf("%*s (count) ", comm_width, "");
+ printf(" (msec) (msec) (msec) (msec) %%\n");
+ printf("%.117s\n", graph_dotted_line);
+
+ machine__for_each_thread(m, show_thread_runtime, &totals);
+ task_count = totals.task_count;
+ if (!task_count)
+ printf("<no still running tasks>\n");
+
+ printf("\nTerminated tasks:\n");
+ machine__for_each_thread(m, show_deadthread_runtime, &totals);
+ if (task_count == totals.task_count)
+ printf("<no terminated tasks>\n");
+
+ /* CPU idle stats not tracked when samples were skipped */
+ if (sched->skipped_samples)
+ return;
+
+ printf("\nIdle stats:\n");
+ for (i = 0; i < idle_max_cpu; ++i) {
+ t = idle_threads[i];
+ if (!t)
+ continue;
+
+ r = thread__priv(t);
+ if (r && r->run_stats.n) {
+ totals.sched_count += r->run_stats.n;
+ printf(" CPU %2d idle for ", i);
+ print_sched_time(r->total_run_time, 6);
+ printf(" msec\n");
+ } else
+ printf(" CPU %2d idle entire time window\n", i);
+ }
+
+ printf("\n"
+ " Total number of unique tasks: %" PRIu64 "\n"
+ "Total number of context switches: %" PRIu64 "\n"
+ " Total run time (msec): ",
+ totals.task_count, totals.sched_count);
+
+ print_sched_time(totals.total_run_time, 2);
+ printf("\n");
+}
+
+typedef int (*sched_handler)(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine);
+
+static int perf_timehist__process_sample(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+ int err = 0;
+ int this_cpu = sample->cpu;
+
+ if (this_cpu > sched->max_cpu)
+ sched->max_cpu = this_cpu;
+
+ if (evsel->handler != NULL) {
+ sched_handler f = evsel->handler;
+
+ err = f(tool, event, evsel, sample, machine);
+ }
+
+ return err;
+}
+
+static int timehist_check_attr(struct perf_sched *sched,
+ struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ struct evsel_runtime *er;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ er = perf_evsel__get_runtime(evsel);
+ if (er == NULL) {
+ pr_err("Failed to allocate memory for evsel runtime data\n");
+ return -1;
+ }
+
+ if (sched->show_callchain &&
+ !(evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ pr_info("Samples do not have callchains.\n");
+ sched->show_callchain = 0;
+ symbol_conf.use_callchain = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int perf_sched__timehist(struct perf_sched *sched)
+{
+ const struct perf_evsel_str_handler handlers[] = {
+ { "sched:sched_switch", timehist_sched_switch_event, },
+ { "sched:sched_wakeup", timehist_sched_wakeup_event, },
+ { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
+ };
+ const struct perf_evsel_str_handler migrate_handlers[] = {
+ { "sched:sched_migrate_task", timehist_migrate_task_event, },
+ };
+ struct perf_data_file file = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = sched->force,
+ };
+
+ struct perf_session *session;
+ struct perf_evlist *evlist;
+ int err = -1;
+
+ /*
+ * event handlers for timehist option
+ */
+ sched->tool.sample = perf_timehist__process_sample;
+ sched->tool.mmap = perf_event__process_mmap;
+ sched->tool.comm = perf_event__process_comm;
+ sched->tool.exit = perf_event__process_exit;
+ sched->tool.fork = perf_event__process_fork;
+ sched->tool.lost = process_lost;
+ sched->tool.attr = perf_event__process_attr;
+ sched->tool.tracing_data = perf_event__process_tracing_data;
+ sched->tool.build_id = perf_event__process_build_id;
+
+ sched->tool.ordered_events = true;
+ sched->tool.ordering_requires_timestamps = true;
+
+ symbol_conf.use_callchain = sched->show_callchain;
+
+ session = perf_session__new(&file, false, &sched->tool);
+ if (session == NULL)
+ return -ENOMEM;
+
+ evlist = session->evlist;
+
+ symbol__init(&session->header.env);
+
+ if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+
+ if (timehist_check_attr(sched, evlist) != 0)
+ goto out;
+
+ setup_pager();
+
+ /* setup per-evsel handlers */
+ if (perf_session__set_tracepoints_handlers(session, handlers))
+ goto out;
+
+ /* sched_switch event at a minimum needs to exist */
+ if (!perf_evlist__find_tracepoint_by_name(session->evlist,
+ "sched:sched_switch")) {
+ pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
+ goto out;
+ }
+
+ if (sched->show_migrations &&
+ perf_session__set_tracepoints_handlers(session, migrate_handlers))
+ goto out;
+
+ /* pre-allocate struct for per-CPU idle stats */
+ sched->max_cpu = session->header.env.nr_cpus_online;
+ if (sched->max_cpu == 0)
+ sched->max_cpu = 4;
+ if (init_idle_threads(sched->max_cpu))
+ goto out;
+
+ /* summary_only implies summary option, but don't overwrite summary if set */
+ if (sched->summary_only)
+ sched->summary = sched->summary_only;
+
+ if (!sched->summary_only)
+ timehist_header(sched);
+
+ err = perf_session__process_events(session);
+ if (err) {
+ pr_err("Failed to process events, error %d", err);
+ goto out;
+ }
+
+ sched->nr_events = evlist->stats.nr_events[0];
+ sched->nr_lost_events = evlist->stats.total_lost;
+ sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
+
+ if (sched->summary)
+ timehist_print_summary(sched, session);
+
+out:
+ free_idle_threads();
+ perf_session__delete(session);
+
+ return err;
+}
+
+
static void print_bad_events(struct perf_sched *sched)
{
if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
@@ -1953,38 +2980,32 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
.next_shortname1 = 'A',
.next_shortname2 = '0',
.skip_merge = 0,
+ .show_callchain = 1,
+ .max_stack = 5,
+ };
+ const struct option sched_options[] = {
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
+ OPT_END()
};
const struct option latency_options[] = {
OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
"sort by key(s): runtime, switch, avg, max"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
OPT_INTEGER('C', "CPU", &sched.profile_cpu,
"CPU to profile on"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
OPT_BOOLEAN('p', "pids", &sched.skip_merge,
"latency stats per pid instead of per comm"),
- OPT_END()
+ OPT_PARENT(sched_options)
};
const struct option replay_options[] = {
OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
"repeat the workload replay N times (-1: infinite)"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
- OPT_END()
- };
- const struct option sched_options[] = {
- OPT_STRING('i', "input", &input_name, "file",
- "input file name"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_END()
+ OPT_PARENT(sched_options)
};
const struct option map_options[] = {
OPT_BOOLEAN(0, "compact", &sched.map.comp,
@@ -1995,8 +3016,31 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
"highlight given CPUs in map"),
OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
"display given CPUs in map"),
- OPT_END()
+ OPT_PARENT(sched_options)
};
+ const struct option timehist_options[] = {
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ "file", "kallsyms pathname"),
+ OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
+ "Display call chains if present (default on)"),
+ OPT_UINTEGER(0, "max-stack", &sched.max_stack,
+ "Maximum number of functions to display backtrace."),
+ OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
+ "Look for files with symbols relative to this directory"),
+ OPT_BOOLEAN('s', "summary", &sched.summary_only,
+ "Show only syscall summary with statistics"),
+ OPT_BOOLEAN('S', "with-summary", &sched.summary,
+ "Show all syscalls and summary with statistics"),
+ OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
+ OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
+ OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
+ OPT_STRING(0, "time", &sched.time_str, "str",
+ "Time span for analysis (start,stop)"),
+ OPT_PARENT(sched_options)
+ };
+
const char * const latency_usage[] = {
"perf sched latency [<options>]",
NULL
@@ -2009,8 +3053,13 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
"perf sched map [<options>]",
NULL
};
+ const char * const timehist_usage[] = {
+ "perf sched timehist [<options>]",
+ NULL
+ };
const char *const sched_subcommands[] = { "record", "latency", "map",
- "replay", "script", NULL };
+ "replay", "script",
+ "timehist", NULL };
const char *sched_usage[] = {
NULL,
NULL
@@ -2073,6 +3122,21 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
usage_with_options(replay_usage, replay_options);
}
return perf_sched__replay(&sched);
+ } else if (!strcmp(argv[0], "timehist")) {
+ if (argc) {
+ argc = parse_options(argc, argv, timehist_options,
+ timehist_usage, 0);
+ if (argc)
+ usage_with_options(timehist_usage, timehist_options);
+ }
+ if (sched.show_wakeups && sched.summary_only) {
+ pr_err(" Error: -s and -w are mutually exclusive.\n");
+ parse_options_usage(timehist_usage, timehist_options, "s", true);
+ parse_options_usage(NULL, timehist_options, "w", true);
+ return -EINVAL;
+ }
+
+ return perf_sched__timehist(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 7228d141a789..2f3ff69fc4e7 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -22,6 +22,7 @@
#include "util/thread_map.h"
#include "util/stat.h"
#include "util/thread-stack.h"
+#include "util/time-utils.h"
#include <linux/bitmap.h>
#include <linux/stringify.h>
#include <linux/time64.h>
@@ -66,6 +67,8 @@ enum perf_output_field {
PERF_OUTPUT_WEIGHT = 1U << 18,
PERF_OUTPUT_BPF_OUTPUT = 1U << 19,
PERF_OUTPUT_CALLINDENT = 1U << 20,
+ PERF_OUTPUT_INSN = 1U << 21,
+ PERF_OUTPUT_INSNLEN = 1U << 22,
};
struct output_option {
@@ -93,6 +96,8 @@ struct output_option {
{.str = "weight", .field = PERF_OUTPUT_WEIGHT},
{.str = "bpf-output", .field = PERF_OUTPUT_BPF_OUTPUT},
{.str = "callindent", .field = PERF_OUTPUT_CALLINDENT},
+ {.str = "insn", .field = PERF_OUTPUT_INSN},
+ {.str = "insnlen", .field = PERF_OUTPUT_INSNLEN},
};
/* default set to maintain compatibility with current format */
@@ -437,7 +442,6 @@ static void print_sample_start(struct perf_sample *sample,
{
struct perf_event_attr *attr = &evsel->attr;
unsigned long secs;
- unsigned long usecs;
unsigned long long nsecs;
if (PRINT_FIELD(COMM)) {
@@ -467,11 +471,14 @@ static void print_sample_start(struct perf_sample *sample,
nsecs = sample->time;
secs = nsecs / NSEC_PER_SEC;
nsecs -= secs * NSEC_PER_SEC;
- usecs = nsecs / NSEC_PER_USEC;
+
if (nanosecs)
printf("%5lu.%09llu: ", secs, nsecs);
- else
- printf("%5lu.%06lu: ", secs, usecs);
+ else {
+ char sample_time[32];
+ timestamp__scnprintf_usec(sample->time, sample_time, sizeof(sample_time));
+ printf("%12s: ", sample_time);
+ }
}
}
@@ -624,6 +631,20 @@ static void print_sample_callindent(struct perf_sample *sample,
printf("%*s", spacing - len, "");
}
+static void print_insn(struct perf_sample *sample,
+ struct perf_event_attr *attr)
+{
+ if (PRINT_FIELD(INSNLEN))
+ printf(" ilen: %d", sample->insn_len);
+ if (PRINT_FIELD(INSN)) {
+ int i;
+
+ printf(" insn:");
+ for (i = 0; i < sample->insn_len; i++)
+ printf(" %02x", (unsigned char)sample->insn[i]);
+ }
+}
+
static void print_sample_bts(struct perf_sample *sample,
struct perf_evsel *evsel,
struct thread *thread,
@@ -668,6 +689,8 @@ static void print_sample_bts(struct perf_sample *sample,
if (print_srcline_last)
map__fprintf_srcline(al->map, al->addr, "\n ", stdout);
+ print_insn(sample, attr);
+
printf("\n");
}
@@ -811,6 +834,8 @@ struct perf_script {
struct cpu_map *cpus;
struct thread_map *threads;
int name_width;
+ const char *time_str;
+ struct perf_time_interval ptime;
};
static int perf_evlist__max_name_len(struct perf_evlist *evlist)
@@ -911,7 +936,7 @@ static void process_event(struct perf_script *script,
if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
print_sample_bpf_output(sample);
-
+ print_insn(sample, attr);
printf("\n");
}
@@ -992,6 +1017,9 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
+ if (perf_time__skip_sample(&scr->ptime, sample->time))
+ return 0;
+
if (debug_mode) {
if (sample->time < last_timestamp) {
pr_err("Samples misordered, previous: %" PRIu64
@@ -2124,11 +2152,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"Valid types: hw,sw,trace,raw. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
"addr,symoff,period,iregs,brstack,brstacksym,flags,"
- "bpf-output,callindent", parse_output_fields),
+ "bpf-output,callindent,insn,insnlen", parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
+ OPT_STRING(0, "stop-bt", &symbol_conf.bt_stop_list_str, "symbol[,symbol...]",
+ "Stop display of callgraph at these symbols"),
OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
"only display events for these comms"),
@@ -2162,7 +2192,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"Enable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
-
+ OPT_STRING(0, "time", &script.time_str, "str",
+ "Time span of interest (start,stop)"),
OPT_END()
};
const char * const script_subcommands[] = { "record", "report", NULL };
@@ -2441,6 +2472,12 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
if (err < 0)
goto out_delete;
+ /* needs to be parsed after looking up reference time */
+ if (perf_time__parse_str(&script.ptime, script.time_str) != 0) {
+ pr_err("Invalid time string\n");
+ return -EINVAL;
+ }
+
err = __cmd_script(&script);
flush_scripting();
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index fe3af9535e85..3df4178ba378 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -130,7 +130,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__disassemble(sym, map, 0);
+ err = symbol__disassemble(sym, map, NULL, 0);
if (err == 0) {
out_assign:
top->sym_filter_entry = he;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index c298bd3e1d90..206bf72b77fc 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -74,8 +74,6 @@ struct trace {
size_t nr;
int *entries;
} ev_qualifier_ids;
- struct intlist *tid_list;
- struct intlist *pid_list;
struct {
size_t nr;
pid_t *entries;
@@ -843,7 +841,6 @@ static size_t fprintf_duration(unsigned long t, FILE *fp)
*/
struct thread_trace {
u64 entry_time;
- u64 exit_time;
bool entry_pending;
unsigned long nr_events;
unsigned long pfmaj, pfmin;
@@ -1452,7 +1449,7 @@ static int trace__printf_interrupted_entry(struct trace *trace, struct perf_samp
duration = sample->time - ttrace->entry_time;
- printed = trace__fprintf_entry_head(trace, trace->current, duration, sample->time, trace->output);
+ printed = trace__fprintf_entry_head(trace, trace->current, duration, ttrace->entry_time, trace->output);
printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
ttrace->entry_pending = false;
@@ -1499,7 +1496,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (sc->is_exit) {
if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
- trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
+ trace__fprintf_entry_head(trace, thread, 1, ttrace->entry_time, trace->output);
fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
}
} else {
@@ -1571,8 +1568,6 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
++trace->stats.vfs_getname;
}
- ttrace->exit_time = sample->time;
-
if (ttrace->entry_time) {
duration = sample->time - ttrace->entry_time;
if (trace__filter_duration(trace, duration))
@@ -1592,7 +1587,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
if (trace->summary_only)
goto out;
- trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
+ trace__fprintf_entry_head(trace, thread, duration, ttrace->entry_time, trace->output);
if (ttrace->entry_pending) {
fprintf(trace->output, "%-70s", ttrace->entry_str);
@@ -1893,18 +1888,6 @@ out_put:
return err;
}
-static bool skip_sample(struct trace *trace, struct perf_sample *sample)
-{
- if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
- (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
- return false;
-
- if (trace->pid_list || trace->tid_list)
- return true;
-
- return false;
-}
-
static void trace__set_base_time(struct trace *trace,
struct perf_evsel *evsel,
struct perf_sample *sample)
@@ -1929,11 +1912,13 @@ static int trace__process_sample(struct perf_tool *tool,
struct machine *machine __maybe_unused)
{
struct trace *trace = container_of(tool, struct trace, tool);
+ struct thread *thread;
int err = 0;
tracepoint_handler handler = evsel->handler;
- if (skip_sample(trace, sample))
+ thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+ if (thread && thread__is_filtered(thread))
return 0;
trace__set_base_time(trace, evsel, sample);
@@ -1946,27 +1931,6 @@ static int trace__process_sample(struct perf_tool *tool,
return err;
}
-static int parse_target_str(struct trace *trace)
-{
- if (trace->opts.target.pid) {
- trace->pid_list = intlist__new(trace->opts.target.pid);
- if (trace->pid_list == NULL) {
- pr_err("Error parsing process id string\n");
- return -EINVAL;
- }
- }
-
- if (trace->opts.target.tid) {
- trace->tid_list = intlist__new(trace->opts.target.tid);
- if (trace->tid_list == NULL) {
- pr_err("Error parsing thread id string\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static int trace__record(struct trace *trace, int argc, const char **argv)
{
unsigned int rec_argc, i, j;
@@ -2310,12 +2274,17 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_mmap;
- if (!target__none(&trace->opts.target))
+ if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
perf_evlist__enable(evlist);
if (forks)
perf_evlist__start_workload(evlist);
+ if (trace->opts.initial_delay) {
+ usleep(trace->opts.initial_delay * 1000);
+ perf_evlist__enable(evlist);
+ }
+
trace->multiple_threads = thread_map__pid(evlist->threads, 0) == -1 ||
evlist->threads->nr > 1 ||
perf_evlist__first(evlist)->attr.inherit;
@@ -2458,6 +2427,12 @@ static int trace__replay(struct trace *trace)
if (session == NULL)
return -1;
+ if (trace->opts.target.pid)
+ symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
+
+ if (trace->opts.target.tid)
+ symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
+
if (symbol__init(&session->header.env) < 0)
goto out;
@@ -2501,10 +2476,6 @@ static int trace__replay(struct trace *trace)
evsel->handler = trace__pgfault;
}
- err = parse_target_str(trace);
- if (err != 0)
- goto out;
-
setup_pager();
err = perf_session__process_events(session);
@@ -2816,6 +2787,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
+ OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
+ "ms to wait before starting measurement after program "
+ "start"),
OPT_END()
};
bool __maybe_unused max_stack_user_set = true;
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 41c24010ab43..0bcf68e98ccc 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -18,6 +18,7 @@ int cmd_bench(int argc, const char **argv, const char *prefix);
int cmd_buildid_cache(int argc, const char **argv, const char *prefix);
int cmd_buildid_list(int argc, const char **argv, const char *prefix);
int cmd_config(int argc, const char **argv, const char *prefix);
+int cmd_c2c(int argc, const char **argv, const char *prefix);
int cmd_diff(int argc, const char **argv, const char *prefix);
int cmd_evlist(int argc, const char **argv, const char *prefix);
int cmd_help(int argc, const char **argv, const char *prefix);
diff --git a/tools/perf/jvmti/Build b/tools/perf/jvmti/Build
new file mode 100644
index 000000000000..eaeb8cb5379b
--- /dev/null
+++ b/tools/perf/jvmti/Build
@@ -0,0 +1,8 @@
+jvmti-y += libjvmti.o
+jvmti-y += jvmti_agent.o
+
+CFLAGS_jvmti = -fPIC -DPIC -I$(JDIR)/include -I$(JDIR)/include/linux
+CFLAGS_REMOVE_jvmti = -Wmissing-declarations
+CFLAGS_REMOVE_jvmti += -Wstrict-prototypes
+CFLAGS_REMOVE_jvmti += -Wextra
+CFLAGS_REMOVE_jvmti += -Wwrite-strings
diff --git a/tools/perf/jvmti/Makefile b/tools/perf/jvmti/Makefile
deleted file mode 100644
index df14e6b67b63..000000000000
--- a/tools/perf/jvmti/Makefile
+++ /dev/null
@@ -1,89 +0,0 @@
-ARCH=$(shell uname -m)
-
-ifeq ($(ARCH), x86_64)
-JARCH=amd64
-endif
-ifeq ($(ARCH), armv7l)
-JARCH=armhf
-endif
-ifeq ($(ARCH), armv6l)
-JARCH=armhf
-endif
-ifeq ($(ARCH), aarch64)
-JARCH=aarch64
-endif
-ifeq ($(ARCH), ppc64)
-JARCH=powerpc
-endif
-ifeq ($(ARCH), ppc64le)
-JARCH=powerpc
-endif
-
-DESTDIR=/usr/local
-
-VERSION=1
-REVISION=0
-AGE=0
-
-LN=ln -sf
-RM=rm
-
-SLIBJVMTI=libjvmti.so.$(VERSION).$(REVISION).$(AGE)
-VLIBJVMTI=libjvmti.so.$(VERSION)
-SLDFLAGS=-shared -Wl,-soname -Wl,$(VLIBJVMTI)
-SOLIBEXT=so
-
-# The following works at least on fedora 23, you may need the next
-# line for other distros.
-ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
-JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
-else
- ifneq (,$(wildcard /usr/sbin/alternatives))
- JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
- endif
-endif
-ifndef JDIR
-$(error Could not find alternatives command, you need to set JDIR= to point to the root of your Java directory)
-else
- ifeq (,$(wildcard $(JDIR)/include/jvmti.h))
- $(error the openjdk development package appears to me missing, install and try again)
- endif
-endif
-$(info Using Java from $(JDIR))
-# -lrt required in 32-bit mode for clock_gettime()
-LIBS=-lelf -lrt
-INCDIR=-I $(JDIR)/include -I $(JDIR)/include/linux
-
-TARGETS=$(SLIBJVMTI)
-
-SRCS=libjvmti.c jvmti_agent.c
-OBJS=$(SRCS:.c=.o)
-SOBJS=$(OBJS:.o=.lo)
-OPT=-O2 -g -Werror -Wall
-
-CFLAGS=$(INCDIR) $(OPT)
-
-all: $(TARGETS)
-
-.c.o:
- $(CC) $(CFLAGS) -c $*.c
-.c.lo:
- $(CC) -fPIC -DPIC $(CFLAGS) -c $*.c -o $*.lo
-
-$(OBJS) $(SOBJS): Makefile jvmti_agent.h ../util/jitdump.h
-
-$(SLIBJVMTI): $(SOBJS)
- $(CC) $(CFLAGS) $(SLDFLAGS) -o $@ $(SOBJS) $(LIBS)
- $(LN) $@ libjvmti.$(SOLIBEXT)
-
-clean:
- $(RM) -f *.o *.so.* *.so *.lo
-
-install:
- -mkdir -p $(DESTDIR)/lib
- install -m 755 $(SLIBJVMTI) $(DESTDIR)/lib/
- (cd $(DESTDIR)/lib; $(LN) $(SLIBJVMTI) $(VLIBJVMTI))
- (cd $(DESTDIR)/lib; $(LN) $(SLIBJVMTI) libjvmti.$(SOLIBEXT))
- ldconfig
-
-.SUFFIXES: .c .S .o .lo
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index 55daefff0d54..e9651a9d670e 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -44,11 +44,6 @@
static char jit_path[PATH_MAX];
static void *marker_addr;
-/*
- * padding buffer
- */
-static const char pad_bytes[7];
-
static inline pid_t gettid(void)
{
return (pid_t)syscall(__NR_gettid);
@@ -230,7 +225,6 @@ init_arch_timestamp(void)
void *jvmti_open(void)
{
- int pad_cnt;
char dump_path[PATH_MAX];
struct jitheader header;
int fd;
@@ -288,10 +282,6 @@ void *jvmti_open(void)
header.total_size = sizeof(header);
header.pid = getpid();
- /* calculate amount of padding '\0' */
- pad_cnt = PADDING_8ALIGNED(header.total_size);
- header.total_size += pad_cnt;
-
header.timestamp = perf_get_timestamp();
if (use_arch_timestamp)
@@ -301,13 +291,6 @@ void *jvmti_open(void)
warn("jvmti: cannot write dumpfile header");
goto error;
}
-
- /* write padding '\0' if necessary */
- if (pad_cnt && !fwrite(pad_bytes, pad_cnt, 1, fp)) {
- warn("jvmti: cannot write dumpfile header padding");
- goto error;
- }
-
return fp;
error:
fclose(fp);
@@ -349,7 +332,6 @@ jvmti_write_code(void *agent, char const *sym,
static int code_generation = 1;
struct jr_code_load rec;
size_t sym_len;
- size_t padding_count;
FILE *fp = agent;
int ret = -1;
@@ -366,8 +348,6 @@ jvmti_write_code(void *agent, char const *sym,
rec.p.id = JIT_CODE_LOAD;
rec.p.total_size = sizeof(rec) + sym_len;
- padding_count = PADDING_8ALIGNED(rec.p.total_size);
- rec.p. total_size += padding_count;
rec.p.timestamp = perf_get_timestamp();
rec.code_size = size;
@@ -393,9 +373,6 @@ jvmti_write_code(void *agent, char const *sym,
ret = fwrite_unlocked(&rec, sizeof(rec), 1, fp);
fwrite_unlocked(sym, sym_len, 1, fp);
- if (padding_count)
- fwrite_unlocked(pad_bytes, padding_count, 1, fp);
-
if (code)
fwrite_unlocked(code, size, 1, fp);
@@ -412,7 +389,6 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
{
struct jr_code_debug_info rec;
size_t sret, len, size, flen;
- size_t padding_count;
uint64_t addr;
const char *fn = file;
FILE *fp = agent;
@@ -443,16 +419,10 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
* int : line number
* int : column discriminator
* file[] : source file name
- * padding : pad to multiple of 8 bytes
*/
size += nr_lines * sizeof(struct debug_entry);
size += flen * nr_lines;
- /*
- * pad to 8 bytes
- */
- padding_count = PADDING_8ALIGNED(size);
-
- rec.p.total_size = size + padding_count;
+ rec.p.total_size = size;
/*
* If JVM is multi-threaded, nultiple concurrent calls to agent
@@ -486,12 +456,6 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
if (sret != 1)
goto error;
}
- if (padding_count) {
- sret = fwrite_unlocked(pad_bytes, padding_count, 1, fp);
- if (sret != 1)
- goto error;
- }
-
funlockfile(fp);
return 0;
error:
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index ac12e4b91a92..5612641c69b4 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -12,6 +12,19 @@
static int has_line_numbers;
void *jvmti_agent;
+static void print_error(jvmtiEnv *jvmti, const char *msg, jvmtiError ret)
+{
+ char *err_msg = NULL;
+ jvmtiError err;
+ err = (*jvmti)->GetErrorName(jvmti, ret, &err_msg);
+ if (err == JVMTI_ERROR_NONE) {
+ warnx("%s failed with %s", msg, err_msg);
+ (*jvmti)->Deallocate(jvmti, (unsigned char *)err_msg);
+ } else {
+ warnx("%s failed with an unknown error %d", msg, ret);
+ }
+}
+
static jvmtiError
do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
jvmti_line_info_t *tab, jint *nr)
@@ -22,8 +35,10 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
jvmtiError ret;
ret = (*jvmti)->GetLineNumberTable(jvmti, m, &nr_lines, &loc_tab);
- if (ret != JVMTI_ERROR_NONE)
+ if (ret != JVMTI_ERROR_NONE) {
+ print_error(jvmti, "GetLineNumberTable", ret);
return ret;
+ }
for (i = 0; i < nr_lines; i++) {
if (loc_tab[i].start_location < bci) {
@@ -71,6 +86,8 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
/* free what was allocated for nothing */
(*jvmti)->Deallocate(jvmti, (unsigned char *)lne);
nr_total += (int)nr;
+ } else {
+ print_error(jvmti, "GetLineNumberTable", ret);
}
}
}
@@ -130,7 +147,7 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
&decl_class);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: cannot get declaring class");
+ print_error(jvmti, "GetMethodDeclaringClass", ret);
return;
}
@@ -144,21 +161,21 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: cannot get source filename ret=%d", ret);
+ print_error(jvmti, "GetSourceFileName", ret);
goto error;
}
ret = (*jvmti)->GetClassSignature(jvmti, decl_class,
&class_sign, NULL);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: getclassignature failed");
+ print_error(jvmti, "GetClassSignature", ret);
goto error;
}
ret = (*jvmti)->GetMethodName(jvmti, method, &func_name,
&func_sign, NULL);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: failed getmethodname");
+ print_error(jvmti, "GetMethodName", ret);
goto error;
}
@@ -253,7 +270,7 @@ Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
ret = (*jvmti)->AddCapabilities(jvmti, &caps1);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: acquire compiled_method capability failed");
+ print_error(jvmti, "AddCapabilities", ret);
return -1;
}
ret = (*jvmti)->GetJLocationFormat(jvmti, &format);
@@ -264,7 +281,9 @@ Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
ret = (*jvmti)->AddCapabilities(jvmti, &caps1);
if (ret == JVMTI_ERROR_NONE)
has_line_numbers = 1;
- }
+ } else if (ret != JVMTI_ERROR_NONE)
+ print_error(jvmti, "GetJLocationFormat", ret);
+
memset(&cb, 0, sizeof(cb));
@@ -273,21 +292,21 @@ Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
ret = (*jvmti)->SetEventCallbacks(jvmti, &cb, sizeof(cb));
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: cannot set event callbacks");
+ print_error(jvmti, "SetEventCallbacks", ret);
return -1;
}
ret = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE,
JVMTI_EVENT_COMPILED_METHOD_LOAD, NULL);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: setnotification failed for method_load");
+ print_error(jvmti, "SetEventNotificationMode(METHOD_LOAD)", ret);
return -1;
}
ret = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE,
JVMTI_EVENT_DYNAMIC_CODE_GENERATED, NULL);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: setnotification failed on code_generated");
+ print_error(jvmti, "SetEventNotificationMode(CODE_GENERATED)", ret);
return -1;
}
return 0;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 64c06961bfe4..aa23b3347d6b 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -43,6 +43,7 @@ static struct cmd_struct commands[] = {
{ "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
{ "config", cmd_config, 0 },
+ { "c2c", cmd_c2c, 0 },
{ "diff", cmd_diff, 0 },
{ "evlist", cmd_evlist, 0 },
{ "help", cmd_help, 0 },
diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
new file mode 100644
index 000000000000..e925baa0c30b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
@@ -0,0 +1,21 @@
+# Format:
+# PVR,Version,JSON/file/pathname,Type
+#
+# where
+# PVR Processor version
+# Version could be used to track version of of JSON file
+# but currently unused.
+# JSON/file/pathname is the path to JSON file, relative
+# to tools/perf/pmu-events/arch/powerpc/.
+# Type is core, uncore etc
+#
+# Multiple PVRs could map to a single JSON file.
+#
+
+# Power8 entries
+004b0000,1,power8.json,core
+004b0201,1,power8.json,core
+004c0000,1,power8.json,core
+004d0000,1,power8.json,core
+004d0100,1,power8.json,core
+004d0200,1,power8.json,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/cache.json b/tools/perf/pmu-events/arch/powerpc/power8/cache.json
new file mode 100644
index 000000000000..4a3daa6b4b96
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/cache.json
@@ -0,0 +1,176 @@
+[
+ {,
+ "EventCode": "0x4c048",
+ "EventName": "PM_DATA_FROM_DL2L3_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x3c048",
+ "EventName": "PM_DATA_FROM_DL2L3_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x3c04c",
+ "EventName": "PM_DATA_FROM_DL4",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x1c042",
+ "EventName": "PM_DATA_FROM_L2",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x200fe",
+ "EventName": "PM_DATA_FROM_L2MISS",
+ "BriefDescription": "Demand LD - L2 Miss (not L2 hit)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1c04e",
+ "EventName": "PM_DATA_FROM_L2MISS_MOD",
+ "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x3c040",
+ "EventName": "PM_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x4c040",
+ "EventName": "PM_DATA_FROM_L2_DISP_CONFLICT_OTHER",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x2c040",
+ "EventName": "PM_DATA_FROM_L2_MEPF",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x1c040",
+ "EventName": "PM_DATA_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x4c042",
+ "EventName": "PM_DATA_FROM_L3",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x300fe",
+ "EventName": "PM_DATA_FROM_L3MISS",
+ "BriefDescription": "Demand LD - L3 Miss (not L2 hit and not L3 hit)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c04e",
+ "EventName": "PM_DATA_FROM_L3MISS_MOD",
+ "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x3c042",
+ "EventName": "PM_DATA_FROM_L3_DISP_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x2c042",
+ "EventName": "PM_DATA_FROM_L3_MEPF",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x1c044",
+ "EventName": "PM_DATA_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x1c04c",
+ "EventName": "PM_DATA_FROM_LL4",
+ "BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x4c04a",
+ "EventName": "PM_DATA_FROM_OFF_CHIP_CACHE",
+ "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x1c048",
+ "EventName": "PM_DATA_FROM_ON_CHIP_CACHE",
+ "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x2c046",
+ "EventName": "PM_DATA_FROM_RL2L3_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x1c04a",
+ "EventName": "PM_DATA_FROM_RL2L3_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x3001a",
+ "EventName": "PM_DATA_TABLEWALK_CYC",
+ "BriefDescription": "Tablwalk Cycles (could be 1 or 2 active)",
+ "PublicDescription": "Data Tablewalk Active"
+ },
+ {,
+ "EventCode": "0x4e04e",
+ "EventName": "PM_DPTEG_FROM_L3MISS",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xd094",
+ "EventName": "PM_DSLB_MISS",
+ "BriefDescription": "Data SLB Miss - Total of all segment sizes",
+ "PublicDescription": "Data SLB Miss - Total of all segment sizesData SLB misses"
+ },
+ {,
+ "EventCode": "0x1002c",
+ "EventName": "PM_L1_DCACHE_RELOADED_ALL",
+ "BriefDescription": "L1 data cache reloaded for demand or prefetch",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x300f6",
+ "EventName": "PM_L1_DCACHE_RELOAD_VALID",
+ "BriefDescription": "DL1 reloaded due to Demand Load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e054",
+ "EventName": "PM_LD_MISS_L1",
+ "BriefDescription": "Load Missed L1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x100ee",
+ "EventName": "PM_LD_REF_L1",
+ "BriefDescription": "All L1 D cache load references counted at finish, gated by reject",
+ "PublicDescription": "Load Ref count combined for all units"
+ },
+ {,
+ "EventCode": "0x300f0",
+ "EventName": "PM_ST_MISS_L1",
+ "BriefDescription": "Store Missed L1",
+ "PublicDescription": ""
+ },
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json b/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json
new file mode 100644
index 000000000000..5f1bb9fca40c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/floating-point.json
@@ -0,0 +1,14 @@
+[
+ {,
+ "EventCode": "0x2000e",
+ "EventName": "PM_FXU_BUSY",
+ "BriefDescription": "fxu0 busy and fxu1 busy",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1000e",
+ "EventName": "PM_FXU_IDLE",
+ "BriefDescription": "fxu0 idle and fxu1 idle",
+ "PublicDescription": ""
+ },
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/frontend.json b/tools/perf/pmu-events/arch/powerpc/power8/frontend.json
new file mode 100644
index 000000000000..04c5f1b7bee1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/frontend.json
@@ -0,0 +1,470 @@
+[
+ {,
+ "EventCode": "0x2505e",
+ "EventName": "PM_BACK_BR_CMPL",
+ "BriefDescription": "Branch instruction completed with a target address less than current instruction address",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10068",
+ "EventName": "PM_BRU_FIN",
+ "BriefDescription": "Branch Instruction Finished",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20036",
+ "EventName": "PM_BR_2PATH",
+ "BriefDescription": "two path branch",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40060",
+ "EventName": "PM_BR_CMPL",
+ "BriefDescription": "Branch Instruction completed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x400f6",
+ "EventName": "PM_BR_MPRED_CMPL",
+ "BriefDescription": "Number of Branch Mispredicts",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x200fa",
+ "EventName": "PM_BR_TAKEN_CMPL",
+ "BriefDescription": "New event for Branch Taken",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10018",
+ "EventName": "PM_IC_DEMAND_CYC",
+ "BriefDescription": "Cycles when a demand ifetch was pending",
+ "PublicDescription": "Demand ifetch pending"
+ },
+ {,
+ "EventCode": "0x100f6",
+ "EventName": "PM_IERAT_RELOAD",
+ "BriefDescription": "Number of I-ERAT reloads",
+ "PublicDescription": "IERAT Reloaded (Miss)"
+ },
+ {,
+ "EventCode": "0x4006a",
+ "EventName": "PM_IERAT_RELOAD_16M",
+ "BriefDescription": "IERAT Reloaded (Miss) for a 16M page",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20064",
+ "EventName": "PM_IERAT_RELOAD_4K",
+ "BriefDescription": "IERAT Miss (Not implemented as DI on POWER6)",
+ "PublicDescription": "IERAT Reloaded (Miss) for a 4k page"
+ },
+ {,
+ "EventCode": "0x3006a",
+ "EventName": "PM_IERAT_RELOAD_64K",
+ "BriefDescription": "IERAT Reloaded (Miss) for a 64k page",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x14050",
+ "EventName": "PM_INST_CHIP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for an instruction fetch",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for an instruction fetch"
+ },
+ {,
+ "EventCode": "0x2",
+ "EventName": "PM_INST_CMPL",
+ "BriefDescription": "Number of PowerPC Instructions that completed",
+ "PublicDescription": "PPC Instructions Finished (completed)"
+ },
+ {,
+ "EventCode": "0x200f2",
+ "EventName": "PM_INST_DISP",
+ "BriefDescription": "PPC Dispatched",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x44048",
+ "EventName": "PM_INST_FROM_DL2L3_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x34048",
+ "EventName": "PM_INST_FROM_DL2L3_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x3404c",
+ "EventName": "PM_INST_FROM_DL4",
+ "BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x4404c",
+ "EventName": "PM_INST_FROM_DMEM",
+ "BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x14042",
+ "EventName": "PM_INST_FROM_L2",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x1404e",
+ "EventName": "PM_INST_FROM_L2MISS",
+ "BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x34040",
+ "EventName": "PM_INST_FROM_L2_DISP_CONFLICT_LDHITST",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x44040",
+ "EventName": "PM_INST_FROM_L2_DISP_CONFLICT_OTHER",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x24040",
+ "EventName": "PM_INST_FROM_L2_MEPF",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x14040",
+ "EventName": "PM_INST_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x44042",
+ "EventName": "PM_INST_FROM_L3",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x300fa",
+ "EventName": "PM_INST_FROM_L3MISS",
+ "BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet",
+ "PublicDescription": "Inst from L3 miss"
+ },
+ {,
+ "EventCode": "0x4404e",
+ "EventName": "PM_INST_FROM_L3MISS_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to a instruction fetch",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x34042",
+ "EventName": "PM_INST_FROM_L3_DISP_CONFLICT",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x24042",
+ "EventName": "PM_INST_FROM_L3_MEPF",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x14044",
+ "EventName": "PM_INST_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x1404c",
+ "EventName": "PM_INST_FROM_LL4",
+ "BriefDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x24048",
+ "EventName": "PM_INST_FROM_LMEM",
+ "BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x2404c",
+ "EventName": "PM_INST_FROM_MEMORY",
+ "BriefDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x4404a",
+ "EventName": "PM_INST_FROM_OFF_CHIP_CACHE",
+ "BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x14048",
+ "EventName": "PM_INST_FROM_ON_CHIP_CACHE",
+ "BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x24046",
+ "EventName": "PM_INST_FROM_RL2L3_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x1404a",
+ "EventName": "PM_INST_FROM_RL2L3_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x2404a",
+ "EventName": "PM_INST_FROM_RL4",
+ "BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x3404a",
+ "EventName": "PM_INST_FROM_RMEM",
+ "BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x24050",
+ "EventName": "PM_INST_GRP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for an instruction fetch",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for an instruction fetch"
+ },
+ {,
+ "EventCode": "0x24052",
+ "EventName": "PM_INST_GRP_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for an instruction fetch",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
+ },
+ {,
+ "EventCode": "0x14052",
+ "EventName": "PM_INST_GRP_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for an instruction fetch",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor an instruction fetch"
+ },
+ {,
+ "EventCode": "0x1003a",
+ "EventName": "PM_INST_IMC_MATCH_CMPL",
+ "BriefDescription": "IMC Match Count ( Not architected in P8)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x14054",
+ "EventName": "PM_INST_PUMP_CPRED",
+ "BriefDescription": "Pump prediction correct. Counts across all types of pumps for an instruction fetch",
+ "PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor an instruction fetch"
+ },
+ {,
+ "EventCode": "0x44052",
+ "EventName": "PM_INST_PUMP_MPRED",
+ "BriefDescription": "Pump misprediction. Counts across all types of pumps for an instruction fetch",
+ "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor an instruction fetch"
+ },
+ {,
+ "EventCode": "0x34050",
+ "EventName": "PM_INST_SYS_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for an instruction fetch",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for an instruction fetch"
+ },
+ {,
+ "EventCode": "0x34052",
+ "EventName": "PM_INST_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for an instruction fetch",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
+ },
+ {,
+ "EventCode": "0x44050",
+ "EventName": "PM_INST_SYS_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for an instruction fetch",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for an instruction fetch"
+ },
+ {,
+ "EventCode": "0x45048",
+ "EventName": "PM_IPTEG_FROM_DL2L3_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x35048",
+ "EventName": "PM_IPTEG_FROM_DL2L3_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3504c",
+ "EventName": "PM_IPTEG_FROM_DL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4504c",
+ "EventName": "PM_IPTEG_FROM_DMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15042",
+ "EventName": "PM_IPTEG_FROM_L2",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1504e",
+ "EventName": "PM_IPTEG_FROM_L2MISS",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x25040",
+ "EventName": "PM_IPTEG_FROM_L2_MEPF",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15040",
+ "EventName": "PM_IPTEG_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x45042",
+ "EventName": "PM_IPTEG_FROM_L3",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4504e",
+ "EventName": "PM_IPTEG_FROM_L3MISS",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x35042",
+ "EventName": "PM_IPTEG_FROM_L3_DISP_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x25042",
+ "EventName": "PM_IPTEG_FROM_L3_MEPF",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15044",
+ "EventName": "PM_IPTEG_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1504c",
+ "EventName": "PM_IPTEG_FROM_LL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x25048",
+ "EventName": "PM_IPTEG_FROM_LMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2504c",
+ "EventName": "PM_IPTEG_FROM_MEMORY",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4504a",
+ "EventName": "PM_IPTEG_FROM_OFF_CHIP_CACHE",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15048",
+ "EventName": "PM_IPTEG_FROM_ON_CHIP_CACHE",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x25046",
+ "EventName": "PM_IPTEG_FROM_RL2L3_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1504a",
+ "EventName": "PM_IPTEG_FROM_RL2L3_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2504a",
+ "EventName": "PM_IPTEG_FROM_RL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3504a",
+ "EventName": "PM_IPTEG_FROM_RMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xd096",
+ "EventName": "PM_ISLB_MISS",
+ "BriefDescription": "I SLB Miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x400fc",
+ "EventName": "PM_ITLB_MISS",
+ "BriefDescription": "ITLB Reloaded (always zero on POWER6)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x200fd",
+ "EventName": "PM_L1_ICACHE_MISS",
+ "BriefDescription": "Demand iCache Miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40012",
+ "EventName": "PM_L1_ICACHE_RELOADED_ALL",
+ "BriefDescription": "Counts all Icache reloads includes demand, prefetchm prefetch turned into demand and demand turned into prefetch",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30068",
+ "EventName": "PM_L1_ICACHE_RELOADED_PREF",
+ "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x300f4",
+ "EventName": "PM_THRD_CONC_RUN_INST",
+ "BriefDescription": "PPC Instructions Finished when both threads in run_cycles",
+ "PublicDescription": "Concurrent Run Instructions"
+ },
+ {,
+ "EventCode": "0x30060",
+ "EventName": "PM_TM_TRANS_RUN_INST",
+ "BriefDescription": "Instructions completed in transactional state",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e014",
+ "EventName": "PM_TM_TX_PASS_RUN_INST",
+ "BriefDescription": "run instructions spent in successful transactions",
+ "PublicDescription": ""
+ },
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/marked.json b/tools/perf/pmu-events/arch/powerpc/power8/marked.json
new file mode 100644
index 000000000000..dcdcede3c3fe
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/marked.json
@@ -0,0 +1,794 @@
+[
+ {,
+ "EventCode": "0x3515e",
+ "EventName": "PM_MRK_BACK_BR_CMPL",
+ "BriefDescription": "Marked branch instruction completed with a target address less than current instruction address",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2013a",
+ "EventName": "PM_MRK_BRU_FIN",
+ "BriefDescription": "bru marked instr finish",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1016e",
+ "EventName": "PM_MRK_BR_CMPL",
+ "BriefDescription": "Branch Instruction completed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x301e4",
+ "EventName": "PM_MRK_BR_MPRED_CMPL",
+ "BriefDescription": "Marked Branch Mispredicted",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x101e2",
+ "EventName": "PM_MRK_BR_TAKEN_CMPL",
+ "BriefDescription": "Marked Branch Taken completed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d148",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d128",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_MOD_CYC",
+ "BriefDescription": "Duration in cycles to reload with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d148",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c128",
+ "EventName": "PM_MRK_DATA_FROM_DL2L3_SHR_CYC",
+ "BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d14c",
+ "EventName": "PM_MRK_DATA_FROM_DL4",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c12c",
+ "EventName": "PM_MRK_DATA_FROM_DL4_CYC",
+ "BriefDescription": "Duration in cycles to reload from another chip's L4 on a different Node or Group (Distant) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d14c",
+ "EventName": "PM_MRK_DATA_FROM_DMEM",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d12c",
+ "EventName": "PM_MRK_DATA_FROM_DMEM_CYC",
+ "BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group (Distant) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d142",
+ "EventName": "PM_MRK_DATA_FROM_L2",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d14e",
+ "EventName": "PM_MRK_DATA_FROM_L2MISS",
+ "BriefDescription": "Data cache reload L2 miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c12e",
+ "EventName": "PM_MRK_DATA_FROM_L2MISS_CYC",
+ "BriefDescription": "Duration in cycles to reload from a localtion other than the local core's L2 due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c122",
+ "EventName": "PM_MRK_DATA_FROM_L2_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L2 due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d140",
+ "EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c120",
+ "EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_LDHITST_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L2 with load hit store conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d140",
+ "EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d120",
+ "EventName": "PM_MRK_DATA_FROM_L2_DISP_CONFLICT_OTHER_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L2 with dispatch conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d140",
+ "EventName": "PM_MRK_DATA_FROM_L2_MEPF",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d120",
+ "EventName": "PM_MRK_DATA_FROM_L2_MEPF_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d140",
+ "EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c120",
+ "EventName": "PM_MRK_DATA_FROM_L2_NO_CONFLICT_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L2 without conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d142",
+ "EventName": "PM_MRK_DATA_FROM_L3",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x201e4",
+ "EventName": "PM_MRK_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d12e",
+ "EventName": "PM_MRK_DATA_FROM_L3MISS_CYC",
+ "BriefDescription": "Duration in cycles to reload from a localtion other than the local core's L3 due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d122",
+ "EventName": "PM_MRK_DATA_FROM_L3_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L3 due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d142",
+ "EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c122",
+ "EventName": "PM_MRK_DATA_FROM_L3_DISP_CONFLICT_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L3 with dispatch conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d142",
+ "EventName": "PM_MRK_DATA_FROM_L3_MEPF",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d122",
+ "EventName": "PM_MRK_DATA_FROM_L3_MEPF_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d144",
+ "EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c124",
+ "EventName": "PM_MRK_DATA_FROM_L3_NO_CONFLICT_CYC",
+ "BriefDescription": "Duration in cycles to reload from local core's L3 without conflict due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d14c",
+ "EventName": "PM_MRK_DATA_FROM_LL4",
+ "BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c12c",
+ "EventName": "PM_MRK_DATA_FROM_LL4_CYC",
+ "BriefDescription": "Duration in cycles to reload from the local chip's L4 cache due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d148",
+ "EventName": "PM_MRK_DATA_FROM_LMEM",
+ "BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d128",
+ "EventName": "PM_MRK_DATA_FROM_LMEM_CYC",
+ "BriefDescription": "Duration in cycles to reload from the local chip's Memory due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d14c",
+ "EventName": "PM_MRK_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d12c",
+ "EventName": "PM_MRK_DATA_FROM_MEMORY_CYC",
+ "BriefDescription": "Duration in cycles to reload from a memory location including L4 from local remote or distant due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d14a",
+ "EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE",
+ "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d12a",
+ "EventName": "PM_MRK_DATA_FROM_OFF_CHIP_CACHE_CYC",
+ "BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d148",
+ "EventName": "PM_MRK_DATA_FROM_ON_CHIP_CACHE",
+ "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c128",
+ "EventName": "PM_MRK_DATA_FROM_ON_CHIP_CACHE_CYC",
+ "BriefDescription": "Duration in cycles to reload either shared or modified data from another core's L2/L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d146",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d126",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_MOD_CYC",
+ "BriefDescription": "Duration in cycles to reload with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d14a",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c12a",
+ "EventName": "PM_MRK_DATA_FROM_RL2L3_SHR_CYC",
+ "BriefDescription": "Duration in cycles to reload with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d14a",
+ "EventName": "PM_MRK_DATA_FROM_RL4",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d12a",
+ "EventName": "PM_MRK_DATA_FROM_RL4_CYC",
+ "BriefDescription": "Duration in cycles to reload from another chip's L4 on the same Node or Group ( Remote) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d14a",
+ "EventName": "PM_MRK_DATA_FROM_RMEM",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c12a",
+ "EventName": "PM_MRK_DATA_FROM_RMEM_CYC",
+ "BriefDescription": "Duration in cycles to reload from another chip's memory on the same Node or Group ( Remote) due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40118",
+ "EventName": "PM_MRK_DCACHE_RELOAD_INTV",
+ "BriefDescription": "Combined Intervention event",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x301e6",
+ "EventName": "PM_MRK_DERAT_MISS",
+ "BriefDescription": "Erat Miss (TLB Access) All page sizes",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d154",
+ "EventName": "PM_MRK_DERAT_MISS_16G",
+ "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16G",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d154",
+ "EventName": "PM_MRK_DERAT_MISS_16M",
+ "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 16M",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d156",
+ "EventName": "PM_MRK_DERAT_MISS_4K",
+ "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 4K",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d154",
+ "EventName": "PM_MRK_DERAT_MISS_64K",
+ "BriefDescription": "Marked Data ERAT Miss (Data TLB Access) page size 64K",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20132",
+ "EventName": "PM_MRK_DFU_FIN",
+ "BriefDescription": "Decimal Unit marked Instruction Finish",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f148",
+ "EventName": "PM_MRK_DPTEG_FROM_DL2L3_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3f148",
+ "EventName": "PM_MRK_DPTEG_FROM_DL2L3_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3f14c",
+ "EventName": "PM_MRK_DPTEG_FROM_DL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f14c",
+ "EventName": "PM_MRK_DPTEG_FROM_DMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f142",
+ "EventName": "PM_MRK_DPTEG_FROM_L2",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f14e",
+ "EventName": "PM_MRK_DPTEG_FROM_L2MISS",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f140",
+ "EventName": "PM_MRK_DPTEG_FROM_L2_MEPF",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f140",
+ "EventName": "PM_MRK_DPTEG_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f142",
+ "EventName": "PM_MRK_DPTEG_FROM_L3",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f14e",
+ "EventName": "PM_MRK_DPTEG_FROM_L3MISS",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3f142",
+ "EventName": "PM_MRK_DPTEG_FROM_L3_DISP_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f142",
+ "EventName": "PM_MRK_DPTEG_FROM_L3_MEPF",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f144",
+ "EventName": "PM_MRK_DPTEG_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f14c",
+ "EventName": "PM_MRK_DPTEG_FROM_LL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f148",
+ "EventName": "PM_MRK_DPTEG_FROM_LMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f14c",
+ "EventName": "PM_MRK_DPTEG_FROM_MEMORY",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f14a",
+ "EventName": "PM_MRK_DPTEG_FROM_OFF_CHIP_CACHE",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f148",
+ "EventName": "PM_MRK_DPTEG_FROM_ON_CHIP_CACHE",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f146",
+ "EventName": "PM_MRK_DPTEG_FROM_RL2L3_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f14a",
+ "EventName": "PM_MRK_DPTEG_FROM_RL2L3_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f14a",
+ "EventName": "PM_MRK_DPTEG_FROM_RL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3f14a",
+ "EventName": "PM_MRK_DPTEG_FROM_RMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x401e4",
+ "EventName": "PM_MRK_DTLB_MISS",
+ "BriefDescription": "Marked dtlb miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d158",
+ "EventName": "PM_MRK_DTLB_MISS_16G",
+ "BriefDescription": "Marked Data TLB Miss page size 16G",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d156",
+ "EventName": "PM_MRK_DTLB_MISS_16M",
+ "BriefDescription": "Marked Data TLB Miss page size 16M",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d156",
+ "EventName": "PM_MRK_DTLB_MISS_4K",
+ "BriefDescription": "Marked Data TLB Miss page size 4k",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d156",
+ "EventName": "PM_MRK_DTLB_MISS_64K",
+ "BriefDescription": "Marked Data TLB Miss page size 64K",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40154",
+ "EventName": "PM_MRK_FAB_RSP_BKILL",
+ "BriefDescription": "Marked store had to do a bkill",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f150",
+ "EventName": "PM_MRK_FAB_RSP_BKILL_CYC",
+ "BriefDescription": "cycles L2 RC took for a bkill",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3015e",
+ "EventName": "PM_MRK_FAB_RSP_CLAIM_RTY",
+ "BriefDescription": "Sampled store did a rwitm and got a rty",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30154",
+ "EventName": "PM_MRK_FAB_RSP_DCLAIM",
+ "BriefDescription": "Marked store had to do a dclaim",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f152",
+ "EventName": "PM_MRK_FAB_RSP_DCLAIM_CYC",
+ "BriefDescription": "cycles L2 RC took for a dclaim",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4015e",
+ "EventName": "PM_MRK_FAB_RSP_RD_RTY",
+ "BriefDescription": "Sampled L2 reads retry count",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1015e",
+ "EventName": "PM_MRK_FAB_RSP_RD_T_INTV",
+ "BriefDescription": "Sampled Read got a T intervention",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f150",
+ "EventName": "PM_MRK_FAB_RSP_RWITM_CYC",
+ "BriefDescription": "cycles L2 RC took for a rwitm",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2015e",
+ "EventName": "PM_MRK_FAB_RSP_RWITM_RTY",
+ "BriefDescription": "Sampled store did a rwitm and got a rty",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20134",
+ "EventName": "PM_MRK_FXU_FIN",
+ "BriefDescription": "fxu marked instr finish",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x401e0",
+ "EventName": "PM_MRK_INST_CMPL",
+ "BriefDescription": "marked instruction completed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20130",
+ "EventName": "PM_MRK_INST_DECODED",
+ "BriefDescription": "marked instruction decoded",
+ "PublicDescription": "marked instruction decoded. Name from ISU?"
+ },
+ {,
+ "EventCode": "0x101e0",
+ "EventName": "PM_MRK_INST_DISP",
+ "BriefDescription": "The thread has dispatched a randomly sampled marked instruction",
+ "PublicDescription": "Marked Instruction dispatched"
+ },
+ {,
+ "EventCode": "0x30130",
+ "EventName": "PM_MRK_INST_FIN",
+ "BriefDescription": "marked instruction finished",
+ "PublicDescription": "marked instr finish any unit"
+ },
+ {,
+ "EventCode": "0x401e6",
+ "EventName": "PM_MRK_INST_FROM_L3MISS",
+ "BriefDescription": "Marked instruction was reloaded from a location beyond the local chiplet",
+ "PublicDescription": "n/a"
+ },
+ {,
+ "EventCode": "0x10132",
+ "EventName": "PM_MRK_INST_ISSUED",
+ "BriefDescription": "Marked instruction issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40134",
+ "EventName": "PM_MRK_INST_TIMEO",
+ "BriefDescription": "marked Instruction finish timeout (instruction lost)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x101e4",
+ "EventName": "PM_MRK_L1_ICACHE_MISS",
+ "BriefDescription": "sampled Instruction suffered an icache Miss",
+ "PublicDescription": "Marked L1 Icache Miss"
+ },
+ {,
+ "EventCode": "0x101ea",
+ "EventName": "PM_MRK_L1_RELOAD_VALID",
+ "BriefDescription": "Marked demand reload",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20114",
+ "EventName": "PM_MRK_L2_RC_DISP",
+ "BriefDescription": "Marked Instruction RC dispatched in L2",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3012a",
+ "EventName": "PM_MRK_L2_RC_DONE",
+ "BriefDescription": "Marked RC done",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40116",
+ "EventName": "PM_MRK_LARX_FIN",
+ "BriefDescription": "Larx finished",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1013e",
+ "EventName": "PM_MRK_LD_MISS_EXPOSED_CYC",
+ "BriefDescription": "Marked Load exposed Miss cycles",
+ "PublicDescription": "Marked Load exposed Miss (use edge detect to count #)"
+ },
+ {,
+ "EventCode": "0x201e2",
+ "EventName": "PM_MRK_LD_MISS_L1",
+ "BriefDescription": "Marked DL1 Demand Miss counted at exec time",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4013e",
+ "EventName": "PM_MRK_LD_MISS_L1_CYC",
+ "BriefDescription": "Marked ld latency",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40132",
+ "EventName": "PM_MRK_LSU_FIN",
+ "BriefDescription": "lsu marked instr finish",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20112",
+ "EventName": "PM_MRK_NTF_FIN",
+ "BriefDescription": "Marked next to finish instruction finished",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d15e",
+ "EventName": "PM_MRK_RUN_CYC",
+ "BriefDescription": "Marked run cycles",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3013e",
+ "EventName": "PM_MRK_STALL_CMPLU_CYC",
+ "BriefDescription": "Marked Group completion Stall",
+ "PublicDescription": "Marked Group Completion Stall cycles (use edge detect to count #)"
+ },
+ {,
+ "EventCode": "0x3e158",
+ "EventName": "PM_MRK_STCX_FAIL",
+ "BriefDescription": "marked stcx failed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10134",
+ "EventName": "PM_MRK_ST_CMPL",
+ "BriefDescription": "marked store completed and sent to nest",
+ "PublicDescription": "Marked store completed"
+ },
+ {,
+ "EventCode": "0x30134",
+ "EventName": "PM_MRK_ST_CMPL_INT",
+ "BriefDescription": "marked store finished with intervention",
+ "PublicDescription": "marked store complete (data home) with intervention"
+ },
+ {,
+ "EventCode": "0x3f150",
+ "EventName": "PM_MRK_ST_DRAIN_TO_L2DISP_CYC",
+ "BriefDescription": "cycles to drain st from core to L2",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3012c",
+ "EventName": "PM_MRK_ST_FWD",
+ "BriefDescription": "Marked st forwards",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f150",
+ "EventName": "PM_MRK_ST_L2DISP_TO_CMPL_CYC",
+ "BriefDescription": "cycles from L2 rc disp to l2 rc completion",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20138",
+ "EventName": "PM_MRK_ST_NEST",
+ "BriefDescription": "Marked store sent to nest",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30132",
+ "EventName": "PM_MRK_VSU_FIN",
+ "BriefDescription": "VSU marked instr finish",
+ "PublicDescription": "vsu (fpu) marked instr finish"
+ },
+ {,
+ "EventCode": "0x3d15e",
+ "EventName": "PM_MULT_MRK",
+ "BriefDescription": "mult marked instr",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15152",
+ "EventName": "PM_SYNC_MRK_BR_LINK",
+ "BriefDescription": "Marked Branch and link branch that can cause a synchronous interrupt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1515c",
+ "EventName": "PM_SYNC_MRK_BR_MPRED",
+ "BriefDescription": "Marked Branch mispredict that can cause a synchronous interrupt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15156",
+ "EventName": "PM_SYNC_MRK_FX_DIVIDE",
+ "BriefDescription": "Marked fixed point divide that can cause a synchronous interrupt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15158",
+ "EventName": "PM_SYNC_MRK_L2HIT",
+ "BriefDescription": "Marked L2 Hits that can throw a synchronous interrupt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1515a",
+ "EventName": "PM_SYNC_MRK_L2MISS",
+ "BriefDescription": "Marked L2 Miss that can throw a synchronous interrupt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15154",
+ "EventName": "PM_SYNC_MRK_L3MISS",
+ "BriefDescription": "Marked L3 misses that can throw a synchronous interrupt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15150",
+ "EventName": "PM_SYNC_MRK_PROBE_NOP",
+ "BriefDescription": "Marked probeNops which can cause synchronous interrupts",
+ "PublicDescription": ""
+ },
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/memory.json b/tools/perf/pmu-events/arch/powerpc/power8/memory.json
new file mode 100644
index 000000000000..87cdaadba7bd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/memory.json
@@ -0,0 +1,212 @@
+[
+ {,
+ "EventCode": "0x10050",
+ "EventName": "PM_CHIP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for all data types ( demand load,data,inst prefetch,inst fetch,xlate (I or d)"
+ },
+ {,
+ "EventCode": "0x1c050",
+ "EventName": "PM_DATA_CHIP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for a demand load",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for a demand load"
+ },
+ {,
+ "EventCode": "0x4c04c",
+ "EventName": "PM_DATA_FROM_DMEM",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x2c048",
+ "EventName": "PM_DATA_FROM_LMEM",
+ "BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from the local chip's Memory due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x2c04c",
+ "EventName": "PM_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x2c04a",
+ "EventName": "PM_DATA_FROM_RL4",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x3c04a",
+ "EventName": "PM_DATA_FROM_RMEM",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x2c050",
+ "EventName": "PM_DATA_GRP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for a demand load",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for a demand load"
+ },
+ {,
+ "EventCode": "0x2c052",
+ "EventName": "PM_DATA_GRP_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for a demand load",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
+ },
+ {,
+ "EventCode": "0x1c052",
+ "EventName": "PM_DATA_GRP_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for a demand load",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor a demand load"
+ },
+ {,
+ "EventCode": "0x1c054",
+ "EventName": "PM_DATA_PUMP_CPRED",
+ "BriefDescription": "Pump prediction correct. Counts across all types of pumps for a demand load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c052",
+ "EventName": "PM_DATA_PUMP_MPRED",
+ "BriefDescription": "Pump misprediction. Counts across all types of pumps for a demand load",
+ "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor a demand load"
+ },
+ {,
+ "EventCode": "0x3c050",
+ "EventName": "PM_DATA_SYS_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for a demand load",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for a demand load"
+ },
+ {,
+ "EventCode": "0x3c052",
+ "EventName": "PM_DATA_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for a demand load",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
+ },
+ {,
+ "EventCode": "0x4c050",
+ "EventName": "PM_DATA_SYS_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for a demand load",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for a demand load"
+ },
+ {,
+ "EventCode": "0x3e04c",
+ "EventName": "PM_DPTEG_FROM_DL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on a different Node or Group (Distant) due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e04c",
+ "EventName": "PM_DPTEG_FROM_DMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group (Distant) due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e04a",
+ "EventName": "PM_DPTEG_FROM_RMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's memory on the same Node or Group ( Remote) due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20050",
+ "EventName": "PM_GRP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20052",
+ "EventName": "PM_GRP_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
+ },
+ {,
+ "EventCode": "0x10052",
+ "EventName": "PM_GRP_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x18082",
+ "EventName": "PM_L3_CO_MEPF",
+ "BriefDescription": "L3 CO of line in Mep state ( includes casthrough",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c058",
+ "EventName": "PM_MEM_CO",
+ "BriefDescription": "Memory castouts from this lpar",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10058",
+ "EventName": "PM_MEM_LOC_THRESH_IFU",
+ "BriefDescription": "Local Memory above threshold for IFU speculation control",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40056",
+ "EventName": "PM_MEM_LOC_THRESH_LSU_HIGH",
+ "BriefDescription": "Local memory above threshold for LSU medium",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1c05e",
+ "EventName": "PM_MEM_LOC_THRESH_LSU_MED",
+ "BriefDescription": "Local memory above theshold for data prefetch",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c058",
+ "EventName": "PM_MEM_PREF",
+ "BriefDescription": "Memory prefetch for this lpar. Includes L4",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10056",
+ "EventName": "PM_MEM_READ",
+ "BriefDescription": "Reads from Memory from this lpar (includes data/inst/xlate/l1prefetch/inst prefetch). Includes L4",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3c05e",
+ "EventName": "PM_MEM_RWITM",
+ "BriefDescription": "Memory rwitm for this lpar",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3006e",
+ "EventName": "PM_NEST_REF_CLK",
+ "BriefDescription": "Multiply by 4 to obtain the number of PB cycles",
+ "PublicDescription": "Nest reference clocks"
+ },
+ {,
+ "EventCode": "0x10054",
+ "EventName": "PM_PUMP_CPRED",
+ "BriefDescription": "Pump prediction correct. Counts across all types of pumps for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x40052",
+ "EventName": "PM_PUMP_MPRED",
+ "BriefDescription": "Pump misprediction. Counts across all types of pumps for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x30050",
+ "EventName": "PM_SYS_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x30052",
+ "EventName": "PM_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
+ },
+ {,
+ "EventCode": "0x40050",
+ "EventName": "PM_SYS_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
new file mode 100644
index 000000000000..704302c3e67d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -0,0 +1,4064 @@
+[
+ {,
+ "EventCode": "0x1f05e",
+ "EventName": "PM_1LPAR_CYC",
+ "BriefDescription": "Number of cycles in single lpar mode. All threads in the core are assigned to the same lpar",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2006e",
+ "EventName": "PM_2LPAR_CYC",
+ "BriefDescription": "Cycles in 2-lpar mode. Threads 0-3 belong to Lpar0 and threads 4-7 belong to Lpar1",
+ "PublicDescription": "Number of cycles in 2 lpar mode"
+ },
+ {,
+ "EventCode": "0x4e05e",
+ "EventName": "PM_4LPAR_CYC",
+ "BriefDescription": "Number of cycles in 4 LPAR mode. Threads 0-1 belong to lpar0, threads 2-3 belong to lpar1, threads 4-5 belong to lpar2, and threads 6-7 belong to lpar3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x610050",
+ "EventName": "PM_ALL_CHIP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for all data types ( demand load,data,inst prefetch,inst fetch,xlate (I or d)"
+ },
+ {,
+ "EventCode": "0x520050",
+ "EventName": "PM_ALL_GRP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x620052",
+ "EventName": "PM_ALL_GRP_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
+ },
+ {,
+ "EventCode": "0x610052",
+ "EventName": "PM_ALL_GRP_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x610054",
+ "EventName": "PM_ALL_PUMP_CPRED",
+ "BriefDescription": "Pump prediction correct. Counts across all types of pumps for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x640052",
+ "EventName": "PM_ALL_PUMP_MPRED",
+ "BriefDescription": "Pump misprediction. Counts across all types of pumps for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x630050",
+ "EventName": "PM_ALL_SYS_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was system pump for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x630052",
+ "EventName": "PM_ALL_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
+ },
+ {,
+ "EventCode": "0x640050",
+ "EventName": "PM_ALL_SYS_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for all data types (demand load,data prefetch,inst prefetch,inst fetch,xlate)",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for all data types excluding data prefetch (demand load,inst prefetch,inst fetch,xlate)"
+ },
+ {,
+ "EventCode": "0x4082",
+ "EventName": "PM_BANK_CONFLICT",
+ "BriefDescription": "Read blocked due to interleave conflict. The ifar logic will detect an interleave conflict and kill the data that was read that cycle",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x5086",
+ "EventName": "PM_BR_BC_8",
+ "BriefDescription": "Pairable BC+8 branch that has not been converted to a Resolve Finished in the BRU pipeline",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x5084",
+ "EventName": "PM_BR_BC_8_CONV",
+ "BriefDescription": "Pairable BC+8 branch that was converted to a Resolve Finished in the BRU pipeline",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40ac",
+ "EventName": "PM_BR_MPRED_CCACHE",
+ "BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Count Cache Target Prediction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40b8",
+ "EventName": "PM_BR_MPRED_CR",
+ "BriefDescription": "Conditional Branch Completed that was Mispredicted due to the BHT Direction Prediction (taken/not taken)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40ae",
+ "EventName": "PM_BR_MPRED_LSTACK",
+ "BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Link Stack Target Prediction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40ba",
+ "EventName": "PM_BR_MPRED_TA",
+ "BriefDescription": "Conditional Branch Completed that was Mispredicted due to the Target Address Prediction from the Count Cache or Link Stack. Only XL-form branches that resolved Taken set this event",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10138",
+ "EventName": "PM_BR_MRK_2PATH",
+ "BriefDescription": "marked two path branch",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x409c",
+ "EventName": "PM_BR_PRED_BR0",
+ "BriefDescription": "Conditional Branch Completed on BR0 (1st branch in group) in which the HW predicted the Direction or Target",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x409e",
+ "EventName": "PM_BR_PRED_BR1",
+ "BriefDescription": "Conditional Branch Completed on BR1 (2nd branch in group) in which the HW predicted the Direction or Target. Note: BR1 can only be used in Single Thread Mode. In all of the SMT modes, only one branch can complete, thus BR1 is unused",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x489c",
+ "EventName": "PM_BR_PRED_BR_CMPL",
+ "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) OR if_pc_br0_br_pred(1)",
+ "PublicDescription": "IFU"
+ },
+ {,
+ "EventCode": "0x40a4",
+ "EventName": "PM_BR_PRED_CCACHE_BR0",
+ "BriefDescription": "Conditional Branch Completed on BR0 that used the Count Cache for Target Prediction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40a6",
+ "EventName": "PM_BR_PRED_CCACHE_BR1",
+ "BriefDescription": "Conditional Branch Completed on BR1 that used the Count Cache for Target Prediction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x48a4",
+ "EventName": "PM_BR_PRED_CCACHE_CMPL",
+ "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) AND if_pc_br0_pred_type",
+ "PublicDescription": "IFU"
+ },
+ {,
+ "EventCode": "0x40b0",
+ "EventName": "PM_BR_PRED_CR_BR0",
+ "BriefDescription": "Conditional Branch Completed on BR0 that had its direction predicted. I-form branches do not set this event. In addition, B-form branches which do not use the BHT do not set this event - these are branches with BO-field set to 'always taken' and branches",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40b2",
+ "EventName": "PM_BR_PRED_CR_BR1",
+ "BriefDescription": "Conditional Branch Completed on BR1 that had its direction predicted. I-form branches do not set this event. In addition, B-form branches which do not use the BHT do not set this event - these are branches with BO-field set to 'always taken' and branches",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x48b0",
+ "EventName": "PM_BR_PRED_CR_CMPL",
+ "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(1)='1'",
+ "PublicDescription": "IFU"
+ },
+ {,
+ "EventCode": "0x40a8",
+ "EventName": "PM_BR_PRED_LSTACK_BR0",
+ "BriefDescription": "Conditional Branch Completed on BR0 that used the Link Stack for Target Prediction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40aa",
+ "EventName": "PM_BR_PRED_LSTACK_BR1",
+ "BriefDescription": "Conditional Branch Completed on BR1 that used the Link Stack for Target Prediction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x48a8",
+ "EventName": "PM_BR_PRED_LSTACK_CMPL",
+ "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0) AND (not if_pc_br0_pred_type)",
+ "PublicDescription": "IFU"
+ },
+ {,
+ "EventCode": "0x40b4",
+ "EventName": "PM_BR_PRED_TA_BR0",
+ "BriefDescription": "Conditional Branch Completed on BR0 that had its target address predicted. Only XL-form branches set this event",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40b6",
+ "EventName": "PM_BR_PRED_TA_BR1",
+ "BriefDescription": "Conditional Branch Completed on BR1 that had its target address predicted. Only XL-form branches set this event",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x48b4",
+ "EventName": "PM_BR_PRED_TA_CMPL",
+ "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred(0)='1'",
+ "PublicDescription": "IFU"
+ },
+ {,
+ "EventCode": "0x40a0",
+ "EventName": "PM_BR_UNCOND_BR0",
+ "BriefDescription": "Unconditional Branch Completed on BR0. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was coverted to a Resolve",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40a2",
+ "EventName": "PM_BR_UNCOND_BR1",
+ "BriefDescription": "Unconditional Branch Completed on BR1. HW branch prediction was not used for this branch. This can be an I-form branch, a B-form branch with BO-field set to branch always, or a B-form branch which was coverted to a Resolve",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x48a0",
+ "EventName": "PM_BR_UNCOND_CMPL",
+ "BriefDescription": "Completion Time Event. This event can also be calculated from the direct bus as follows: if_pc_br0_br_pred=00 AND if_pc_br0_completed",
+ "PublicDescription": "IFU"
+ },
+ {,
+ "EventCode": "0x3094",
+ "EventName": "PM_CASTOUT_ISSUED",
+ "BriefDescription": "Castouts issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3096",
+ "EventName": "PM_CASTOUT_ISSUED_GPR",
+ "BriefDescription": "Castouts issued GPR",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2090",
+ "EventName": "PM_CLB_HELD",
+ "BriefDescription": "CLB Hold: Any Reason",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d018",
+ "EventName": "PM_CMPLU_STALL_BRU_CRU",
+ "BriefDescription": "Completion stall due to IFU",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30026",
+ "EventName": "PM_CMPLU_STALL_COQ_FULL",
+ "BriefDescription": "Completion stall due to CO q full",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30038",
+ "EventName": "PM_CMPLU_STALL_FLUSH",
+ "BriefDescription": "completion stall due to flush by own thread",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30028",
+ "EventName": "PM_CMPLU_STALL_MEM_ECC_DELAY",
+ "BriefDescription": "Completion stall due to mem ECC delay",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e01c",
+ "EventName": "PM_CMPLU_STALL_NO_NTF",
+ "BriefDescription": "Completion stall due to nop",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e01e",
+ "EventName": "PM_CMPLU_STALL_NTCG_FLUSH",
+ "BriefDescription": "Completion stall due to ntcg flush",
+ "PublicDescription": "Completion stall due to reject (load hit store)"
+ },
+ {,
+ "EventCode": "0x4c010",
+ "EventName": "PM_CMPLU_STALL_REJECT",
+ "BriefDescription": "Completion stall due to LSU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c01a",
+ "EventName": "PM_CMPLU_STALL_REJECT_LHS",
+ "BriefDescription": "Completion stall due to reject (load hit store)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c014",
+ "EventName": "PM_CMPLU_STALL_REJ_LMQ_FULL",
+ "BriefDescription": "Completion stall due to LSU reject LMQ full",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d010",
+ "EventName": "PM_CMPLU_STALL_SCALAR",
+ "BriefDescription": "Completion stall due to VSU scalar instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d010",
+ "EventName": "PM_CMPLU_STALL_SCALAR_LONG",
+ "BriefDescription": "Completion stall due to VSU scalar long latency instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c014",
+ "EventName": "PM_CMPLU_STALL_STORE",
+ "BriefDescription": "Completion stall by stores this includes store agen finishes in pipe LS0/LS1 and store data finishes in LS2/LS3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d014",
+ "EventName": "PM_CMPLU_STALL_VECTOR",
+ "BriefDescription": "Completion stall due to VSU vector instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d012",
+ "EventName": "PM_CMPLU_STALL_VECTOR_LONG",
+ "BriefDescription": "Completion stall due to VSU vector long instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d012",
+ "EventName": "PM_CMPLU_STALL_VSU",
+ "BriefDescription": "Completion stall due to VSU instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x16083",
+ "EventName": "PM_CO0_ALLOC",
+ "BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0x16082",
+ "EventName": "PM_CO0_BUSY",
+ "BriefDescription": "CO mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x517082",
+ "EventName": "PM_CO_DISP_FAIL",
+ "BriefDescription": "CO dispatch failed due to all CO machines being busy",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x527084",
+ "EventName": "PM_CO_TM_SC_FOOTPRINT",
+ "BriefDescription": "L2 did a cleanifdirty CO to the L3 (ie created an SC line in the L3)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3608a",
+ "EventName": "PM_CO_USAGE",
+ "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 CO machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40066",
+ "EventName": "PM_CRU_FIN",
+ "BriefDescription": "IFU Finished a (non-branch) instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x61c050",
+ "EventName": "PM_DATA_ALL_CHIP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for either demand loads or data prefetch",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for a demand load"
+ },
+ {,
+ "EventCode": "0x64c048",
+ "EventName": "PM_DATA_ALL_FROM_DL2L3_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x63c048",
+ "EventName": "PM_DATA_ALL_FROM_DL2L3_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x63c04c",
+ "EventName": "PM_DATA_ALL_FROM_DL4",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x64c04c",
+ "EventName": "PM_DATA_ALL_FROM_DMEM",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x61c042",
+ "EventName": "PM_DATA_ALL_FROM_L2",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x64c046",
+ "EventName": "PM_DATA_ALL_FROM_L21_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x63c046",
+ "EventName": "PM_DATA_ALL_FROM_L21_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x61c04e",
+ "EventName": "PM_DATA_ALL_FROM_L2MISS_MOD",
+ "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x63c040",
+ "EventName": "PM_DATA_ALL_FROM_L2_DISP_CONFLICT_LDHITST",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 with load hit store conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x64c040",
+ "EventName": "PM_DATA_ALL_FROM_L2_DISP_CONFLICT_OTHER",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x62c040",
+ "EventName": "PM_DATA_ALL_FROM_L2_MEPF",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x61c040",
+ "EventName": "PM_DATA_ALL_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L2 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x64c042",
+ "EventName": "PM_DATA_ALL_FROM_L3",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x64c044",
+ "EventName": "PM_DATA_ALL_FROM_L31_ECO_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x63c044",
+ "EventName": "PM_DATA_ALL_FROM_L31_ECO_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x62c044",
+ "EventName": "PM_DATA_ALL_FROM_L31_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x61c046",
+ "EventName": "PM_DATA_ALL_FROM_L31_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x64c04e",
+ "EventName": "PM_DATA_ALL_FROM_L3MISS_MOD",
+ "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x63c042",
+ "EventName": "PM_DATA_ALL_FROM_L3_DISP_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L3 with dispatch conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x62c042",
+ "EventName": "PM_DATA_ALL_FROM_L3_MEPF",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x61c044",
+ "EventName": "PM_DATA_ALL_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from local core's L3 without conflict due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x61c04c",
+ "EventName": "PM_DATA_ALL_FROM_LL4",
+ "BriefDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from the local chip's L4 cache due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x62c048",
+ "EventName": "PM_DATA_ALL_FROM_LMEM",
+ "BriefDescription": "The processor's data cache was reloaded from the local chip's Memory due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from the local chip's Memory due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x62c04c",
+ "EventName": "PM_DATA_ALL_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x64c04a",
+ "EventName": "PM_DATA_ALL_FROM_OFF_CHIP_CACHE",
+ "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x61c048",
+ "EventName": "PM_DATA_ALL_FROM_ON_CHIP_CACHE",
+ "BriefDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x62c046",
+ "EventName": "PM_DATA_ALL_FROM_RL2L3_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x61c04a",
+ "EventName": "PM_DATA_ALL_FROM_RL2L3_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x62c04a",
+ "EventName": "PM_DATA_ALL_FROM_RL4",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x63c04a",
+ "EventName": "PM_DATA_ALL_FROM_RMEM",
+ "BriefDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x62c050",
+ "EventName": "PM_DATA_ALL_GRP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for either demand loads or data prefetch",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for a demand load"
+ },
+ {,
+ "EventCode": "0x62c052",
+ "EventName": "PM_DATA_ALL_GRP_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for either demand loads or data prefetch",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
+ },
+ {,
+ "EventCode": "0x61c052",
+ "EventName": "PM_DATA_ALL_GRP_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for either demand loads or data prefetch",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor a demand load"
+ },
+ {,
+ "EventCode": "0x61c054",
+ "EventName": "PM_DATA_ALL_PUMP_CPRED",
+ "BriefDescription": "Pump prediction correct. Counts across all types of pumps for either demand loads or data prefetch",
+ "PublicDescription": "Pump prediction correct. Counts across all types of pumps for a demand load"
+ },
+ {,
+ "EventCode": "0x64c052",
+ "EventName": "PM_DATA_ALL_PUMP_MPRED",
+ "BriefDescription": "Pump misprediction. Counts across all types of pumps for either demand loads or data prefetch",
+ "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor a demand load"
+ },
+ {,
+ "EventCode": "0x63c050",
+ "EventName": "PM_DATA_ALL_SYS_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for either demand loads or data prefetch",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for a demand load"
+ },
+ {,
+ "EventCode": "0x63c052",
+ "EventName": "PM_DATA_ALL_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for either demand loads or data prefetch",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
+ },
+ {,
+ "EventCode": "0x64c050",
+ "EventName": "PM_DATA_ALL_SYS_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for either demand loads or data prefetch",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for a demand load"
+ },
+ {,
+ "EventCode": "0x4c046",
+ "EventName": "PM_DATA_FROM_L21_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x3c046",
+ "EventName": "PM_DATA_FROM_L21_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x4c044",
+ "EventName": "PM_DATA_FROM_L31_ECO_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x3c044",
+ "EventName": "PM_DATA_FROM_L31_ECO_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x2c044",
+ "EventName": "PM_DATA_FROM_L31_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x1c046",
+ "EventName": "PM_DATA_FROM_L31_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ },
+ {,
+ "EventCode": "0x400fe",
+ "EventName": "PM_DATA_FROM_MEM",
+ "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a demand load",
+ "PublicDescription": "Data cache reload from memory (including L4)"
+ },
+ {,
+ "EventCode": "0xe0bc",
+ "EventName": "PM_DC_COLLISIONS",
+ "BriefDescription": "DATA Cache collisions",
+ "PublicDescription": "DATA Cache collisions42"
+ },
+ {,
+ "EventCode": "0x1e050",
+ "EventName": "PM_DC_PREF_STREAM_ALLOC",
+ "BriefDescription": "Stream marked valid. The stream could have been allocated through the hardware prefetch mechanism or through software. This is combined ls0 and ls1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e050",
+ "EventName": "PM_DC_PREF_STREAM_CONF",
+ "BriefDescription": "A demand load referenced a line in an active prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software. Combine up + down",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e050",
+ "EventName": "PM_DC_PREF_STREAM_FUZZY_CONF",
+ "BriefDescription": "A demand load referenced a line in an active fuzzy prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software.Fuzzy stream confirm (out of order effects, or pf cant keep up)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e050",
+ "EventName": "PM_DC_PREF_STREAM_STRIDED_CONF",
+ "BriefDescription": "A demand load referenced a line in an active strided prefetch stream. The stream could have been allocated through the hardware prefetch mechanism or through software",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0ba",
+ "EventName": "PM_DFU",
+ "BriefDescription": "Finish DFU (all finish)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0be",
+ "EventName": "PM_DFU_DCFFIX",
+ "BriefDescription": "Convert from fixed opcode finish (dcffix,dcffixq)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0bc",
+ "EventName": "PM_DFU_DENBCD",
+ "BriefDescription": "BCD->DPD opcode finish (denbcd, denbcdq)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0b8",
+ "EventName": "PM_DFU_MC",
+ "BriefDescription": "Finish DFU multicycle",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2092",
+ "EventName": "PM_DISP_CLB_HELD_BAL",
+ "BriefDescription": "Dispatch/CLB Hold: Balance",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2094",
+ "EventName": "PM_DISP_CLB_HELD_RES",
+ "BriefDescription": "Dispatch/CLB Hold: Resource",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20a8",
+ "EventName": "PM_DISP_CLB_HELD_SB",
+ "BriefDescription": "Dispatch/CLB Hold: Scoreboard",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2098",
+ "EventName": "PM_DISP_CLB_HELD_SYNC",
+ "BriefDescription": "Dispatch/CLB Hold: Sync type instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2096",
+ "EventName": "PM_DISP_CLB_HELD_TLBIE",
+ "BriefDescription": "Dispatch Hold: Due to TLBIE",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20006",
+ "EventName": "PM_DISP_HELD_IQ_FULL",
+ "BriefDescription": "Dispatch held due to Issue q full",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1002a",
+ "EventName": "PM_DISP_HELD_MAP_FULL",
+ "BriefDescription": "Dispatch for this thread was held because the Mappers were full",
+ "PublicDescription": "Dispatch held due to Mapper full"
+ },
+ {,
+ "EventCode": "0x30018",
+ "EventName": "PM_DISP_HELD_SRQ_FULL",
+ "BriefDescription": "Dispatch held due SRQ no room",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30a6",
+ "EventName": "PM_DISP_HOLD_GCT_FULL",
+ "BriefDescription": "Dispatch Hold Due to no space in the GCT",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30008",
+ "EventName": "PM_DISP_WT",
+ "BriefDescription": "Dispatched Starved",
+ "PublicDescription": "Dispatched Starved (not held, nothing to dispatch)"
+ },
+ {,
+ "EventCode": "0x4e046",
+ "EventName": "PM_DPTEG_FROM_L21_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e046",
+ "EventName": "PM_DPTEG_FROM_L21_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e040",
+ "EventName": "PM_DPTEG_FROM_L2_DISP_CONFLICT_LDHITST",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e040",
+ "EventName": "PM_DPTEG_FROM_L2_DISP_CONFLICT_OTHER",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e044",
+ "EventName": "PM_DPTEG_FROM_L31_ECO_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e044",
+ "EventName": "PM_DPTEG_FROM_L31_ECO_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e044",
+ "EventName": "PM_DPTEG_FROM_L31_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e046",
+ "EventName": "PM_DPTEG_FROM_L31_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50a8",
+ "EventName": "PM_EAT_FORCE_MISPRED",
+ "BriefDescription": "XL-form branch was mispredicted due to the predicted target address missing from EAT. The EAT forces a mispredict in this case since there is no predicated target to validate. This is a rare case that may occur when the EAT is full and a branch is issue",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4084",
+ "EventName": "PM_EAT_FULL_CYC",
+ "BriefDescription": "Cycles No room in EAT",
+ "PublicDescription": "Cycles No room in EATSet on bank conflict and case where no ibuffers available"
+ },
+ {,
+ "EventCode": "0x2080",
+ "EventName": "PM_EE_OFF_EXT_INT",
+ "BriefDescription": "Ee off and external interrupt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20b4",
+ "EventName": "PM_FAV_TBEGIN",
+ "BriefDescription": "Dispatch time Favored tbegin",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x100f4",
+ "EventName": "PM_FLOP",
+ "BriefDescription": "Floating Point Operation Finished",
+ "PublicDescription": "Floating Point Operations Finished"
+ },
+ {,
+ "EventCode": "0xa0ae",
+ "EventName": "PM_FLOP_SUM_SCALAR",
+ "BriefDescription": "flops summary scalar instructions",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0ac",
+ "EventName": "PM_FLOP_SUM_VEC",
+ "BriefDescription": "flops summary vector instructions",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2084",
+ "EventName": "PM_FLUSH_BR_MPRED",
+ "BriefDescription": "Flush caused by branch mispredict",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2082",
+ "EventName": "PM_FLUSH_DISP",
+ "BriefDescription": "Dispatch flush",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x208c",
+ "EventName": "PM_FLUSH_DISP_SB",
+ "BriefDescription": "Dispatch Flush: Scoreboard",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2088",
+ "EventName": "PM_FLUSH_DISP_SYNC",
+ "BriefDescription": "Dispatch Flush: Sync",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x208a",
+ "EventName": "PM_FLUSH_DISP_TLBIE",
+ "BriefDescription": "Dispatch Flush: TLBIE",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x208e",
+ "EventName": "PM_FLUSH_LSU",
+ "BriefDescription": "Flush initiated by LSU",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2086",
+ "EventName": "PM_FLUSH_PARTIAL",
+ "BriefDescription": "Partial flush",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0b0",
+ "EventName": "PM_FPU0_FCONV",
+ "BriefDescription": "Convert instruction executed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0b8",
+ "EventName": "PM_FPU0_FEST",
+ "BriefDescription": "Estimate instruction executed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0b4",
+ "EventName": "PM_FPU0_FRSP",
+ "BriefDescription": "Round to single precision instruction executed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0b2",
+ "EventName": "PM_FPU1_FCONV",
+ "BriefDescription": "Convert instruction executed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0ba",
+ "EventName": "PM_FPU1_FEST",
+ "BriefDescription": "Estimate instruction executed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0b6",
+ "EventName": "PM_FPU1_FRSP",
+ "BriefDescription": "Round to single precision instruction executed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50b0",
+ "EventName": "PM_FUSION_TOC_GRP0_1",
+ "BriefDescription": "One pair of instructions fused with TOC in Group0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50ae",
+ "EventName": "PM_FUSION_TOC_GRP0_2",
+ "BriefDescription": "Two pairs of instructions fused with TOCin Group0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50ac",
+ "EventName": "PM_FUSION_TOC_GRP0_3",
+ "BriefDescription": "Three pairs of instructions fused with TOC in Group0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50b2",
+ "EventName": "PM_FUSION_TOC_GRP1_1",
+ "BriefDescription": "One pair of instructions fused with TOX in Group1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50b8",
+ "EventName": "PM_FUSION_VSX_GRP0_1",
+ "BriefDescription": "One pair of instructions fused with VSX in Group0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50b6",
+ "EventName": "PM_FUSION_VSX_GRP0_2",
+ "BriefDescription": "Two pairs of instructions fused with VSX in Group0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50b4",
+ "EventName": "PM_FUSION_VSX_GRP0_3",
+ "BriefDescription": "Three pairs of instructions fused with VSX in Group0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50ba",
+ "EventName": "PM_FUSION_VSX_GRP1_1",
+ "BriefDescription": "One pair of instructions fused with VSX in Group1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3000e",
+ "EventName": "PM_FXU0_BUSY_FXU1_IDLE",
+ "BriefDescription": "fxu0 busy and fxu1 idle",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10004",
+ "EventName": "PM_FXU0_FIN",
+ "BriefDescription": "The fixed point unit Unit 0 finished an instruction. Instructions that finish may not necessary complete",
+ "PublicDescription": "FXU0 Finished"
+ },
+ {,
+ "EventCode": "0x4000e",
+ "EventName": "PM_FXU1_BUSY_FXU0_IDLE",
+ "BriefDescription": "fxu0 idle and fxu1 busy",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40004",
+ "EventName": "PM_FXU1_FIN",
+ "BriefDescription": "FXU1 Finished",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20008",
+ "EventName": "PM_GCT_EMPTY_CYC",
+ "BriefDescription": "No itags assigned either thread (GCT Empty)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30a4",
+ "EventName": "PM_GCT_MERGE",
+ "BriefDescription": "Group dispatched on a merged GCT empty. GCT entries can be merged only within the same thread",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d01e",
+ "EventName": "PM_GCT_NOSLOT_BR_MPRED",
+ "BriefDescription": "Gct empty for this thread due to branch mispred",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d01a",
+ "EventName": "PM_GCT_NOSLOT_BR_MPRED_ICMISS",
+ "BriefDescription": "Gct empty for this thread due to Icache Miss and branch mispred",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x100f8",
+ "EventName": "PM_GCT_NOSLOT_CYC",
+ "BriefDescription": "No itags assigned",
+ "PublicDescription": "Pipeline empty (No itags assigned , no GCT slots used)"
+ },
+ {,
+ "EventCode": "0x2d01e",
+ "EventName": "PM_GCT_NOSLOT_DISP_HELD_ISSQ",
+ "BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to Issue q full",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d01c",
+ "EventName": "PM_GCT_NOSLOT_DISP_HELD_MAP",
+ "BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to Mapper full",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e010",
+ "EventName": "PM_GCT_NOSLOT_DISP_HELD_OTHER",
+ "BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to sync",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d01c",
+ "EventName": "PM_GCT_NOSLOT_DISP_HELD_SRQ",
+ "BriefDescription": "Gct empty for this thread due to dispatch hold on this thread due to SRQ full",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e010",
+ "EventName": "PM_GCT_NOSLOT_IC_L3MISS",
+ "BriefDescription": "Gct empty for this thread due to icach l3 miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d01a",
+ "EventName": "PM_GCT_NOSLOT_IC_MISS",
+ "BriefDescription": "Gct empty for this thread due to Icache Miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20a2",
+ "EventName": "PM_GCT_UTIL_11_14_ENTRIES",
+ "BriefDescription": "GCT Utilization 11-14 entries",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20a4",
+ "EventName": "PM_GCT_UTIL_15_17_ENTRIES",
+ "BriefDescription": "GCT Utilization 15-17 entries",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20a6",
+ "EventName": "PM_GCT_UTIL_18_ENTRIES",
+ "BriefDescription": "GCT Utilization 18+ entries",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x209c",
+ "EventName": "PM_GCT_UTIL_1_2_ENTRIES",
+ "BriefDescription": "GCT Utilization 1-2 entries",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x209e",
+ "EventName": "PM_GCT_UTIL_3_6_ENTRIES",
+ "BriefDescription": "GCT Utilization 3-6 entries",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20a0",
+ "EventName": "PM_GCT_UTIL_7_10_ENTRIES",
+ "BriefDescription": "GCT Utilization 7-10 entries",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1000a",
+ "EventName": "PM_GRP_BR_MPRED_NONSPEC",
+ "BriefDescription": "Group experienced non-speculative branch redirect",
+ "PublicDescription": "Group experienced Non-speculative br mispredicct"
+ },
+ {,
+ "EventCode": "0x30004",
+ "EventName": "PM_GRP_CMPL",
+ "BriefDescription": "group completed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3000a",
+ "EventName": "PM_GRP_DISP",
+ "BriefDescription": "group dispatch",
+ "PublicDescription": "dispatch_success (Group Dispatched)"
+ },
+ {,
+ "EventCode": "0x1000c",
+ "EventName": "PM_GRP_IC_MISS_NONSPEC",
+ "BriefDescription": "Group experienced non-speculative I cache miss",
+ "PublicDescription": "Group experi enced Non-specu lative I cache miss"
+ },
+ {,
+ "EventCode": "0x10130",
+ "EventName": "PM_GRP_MRK",
+ "BriefDescription": "Instruction Marked",
+ "PublicDescription": "Instruction marked in idu"
+ },
+ {,
+ "EventCode": "0x509c",
+ "EventName": "PM_GRP_NON_FULL_GROUP",
+ "BriefDescription": "GROUPs where we did not have 6 non branch instructions in the group(ST mode), in SMT mode 3 non branches",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50a4",
+ "EventName": "PM_GRP_TERM_2ND_BRANCH",
+ "BriefDescription": "There were enough instructions in the Ibuffer, but 2nd branch ends group",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50a6",
+ "EventName": "PM_GRP_TERM_FPU_AFTER_BR",
+ "BriefDescription": "There were enough instructions in the Ibuffer, but FPU OP IN same group after a branch terminates a group, cant do partial flushes",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x509e",
+ "EventName": "PM_GRP_TERM_NOINST",
+ "BriefDescription": "Do not fill every slot in the group, Not enough instructions in the Ibuffer. This includes cases where the group started with enough instructions, but some got knocked out by a cache miss or branch redirect (which would also empty the Ibuffer)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50a0",
+ "EventName": "PM_GRP_TERM_OTHER",
+ "BriefDescription": "There were enough instructions in the Ibuffer, but the group terminated early for some other reason, most likely due to a First or Last",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x50a2",
+ "EventName": "PM_GRP_TERM_SLOT_LIMIT",
+ "BriefDescription": "There were enough instructions in the Ibuffer, but 3 src RA/RB/RC , 2 way crack caused a group termination",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4086",
+ "EventName": "PM_IBUF_FULL_CYC",
+ "BriefDescription": "Cycles No room in ibuff",
+ "PublicDescription": "Cycles No room in ibufffully qualified transfer (if5 valid)"
+ },
+ {,
+ "EventCode": "0x4098",
+ "EventName": "PM_IC_DEMAND_L2_BHT_REDIRECT",
+ "BriefDescription": "L2 I cache demand request due to BHT redirect, branch redirect ( 2 bubbles 3 cycles)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x409a",
+ "EventName": "PM_IC_DEMAND_L2_BR_REDIRECT",
+ "BriefDescription": "L2 I cache demand request due to branch Mispredict ( 15 cycle path)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4088",
+ "EventName": "PM_IC_DEMAND_REQ",
+ "BriefDescription": "Demand Instruction fetch request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x508a",
+ "EventName": "PM_IC_INVALIDATE",
+ "BriefDescription": "Ic line invalidated",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4092",
+ "EventName": "PM_IC_PREF_CANCEL_HIT",
+ "BriefDescription": "Prefetch Canceled due to icache hit",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4094",
+ "EventName": "PM_IC_PREF_CANCEL_L2",
+ "BriefDescription": "L2 Squashed request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4090",
+ "EventName": "PM_IC_PREF_CANCEL_PAGE",
+ "BriefDescription": "Prefetch Canceled due to page boundary",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x408a",
+ "EventName": "PM_IC_PREF_REQ",
+ "BriefDescription": "Instruction prefetch requests",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x408e",
+ "EventName": "PM_IC_PREF_WRITE",
+ "BriefDescription": "Instruction prefetch written into IL1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4096",
+ "EventName": "PM_IC_RELOAD_PRIVATE",
+ "BriefDescription": "Reloading line was brought in private for a specific thread. Most lines are brought in shared for all eight thrreads. If RA does not match then invalidates and then brings it shared to other thread. In P7 line brought in private , then line was invalidat",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x5088",
+ "EventName": "PM_IFU_L2_TOUCH",
+ "BriefDescription": "L2 touch to update MRU on a line",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x514050",
+ "EventName": "PM_INST_ALL_CHIP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was chip pump (prediction=correct) for instruction fetches and prefetches",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was chip pump (prediction=correct) for an instruction fetch"
+ },
+ {,
+ "EventCode": "0x544048",
+ "EventName": "PM_INST_ALL_FROM_DL2L3_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x534048",
+ "EventName": "PM_INST_ALL_FROM_DL2L3_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x53404c",
+ "EventName": "PM_INST_ALL_FROM_DL4",
+ "BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on a different Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x54404c",
+ "EventName": "PM_INST_ALL_FROM_DMEM",
+ "BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group (Distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x514042",
+ "EventName": "PM_INST_ALL_FROM_L2",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x544046",
+ "EventName": "PM_INST_ALL_FROM_L21_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x534046",
+ "EventName": "PM_INST_ALL_FROM_L21_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x51404e",
+ "EventName": "PM_INST_ALL_FROM_L2MISS",
+ "BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x534040",
+ "EventName": "PM_INST_ALL_FROM_L2_DISP_CONFLICT_LDHITST",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with load hit store conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x544040",
+ "EventName": "PM_INST_ALL_FROM_L2_DISP_CONFLICT_OTHER",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x524040",
+ "EventName": "PM_INST_ALL_FROM_L2_MEPF",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 hit without dispatch conflicts on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x514040",
+ "EventName": "PM_INST_ALL_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L2 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x544042",
+ "EventName": "PM_INST_ALL_FROM_L3",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x544044",
+ "EventName": "PM_INST_ALL_FROM_L31_ECO_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x534044",
+ "EventName": "PM_INST_ALL_FROM_L31_ECO_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x524044",
+ "EventName": "PM_INST_ALL_FROM_L31_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x514046",
+ "EventName": "PM_INST_ALL_FROM_L31_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x54404e",
+ "EventName": "PM_INST_ALL_FROM_L3MISS_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to a instruction fetch",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x534042",
+ "EventName": "PM_INST_ALL_FROM_L3_DISP_CONFLICT",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 with dispatch conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x524042",
+ "EventName": "PM_INST_ALL_FROM_L3_MEPF",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without dispatch conflicts hit on Mepf state. due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x514044",
+ "EventName": "PM_INST_ALL_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from local core's L3 without conflict due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x51404c",
+ "EventName": "PM_INST_ALL_FROM_LL4",
+ "BriefDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from the local chip's L4 cache due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x524048",
+ "EventName": "PM_INST_ALL_FROM_LMEM",
+ "BriefDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from the local chip's Memory due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x52404c",
+ "EventName": "PM_INST_ALL_FROM_MEMORY",
+ "BriefDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a memory location including L4 from local remote or distant due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x54404a",
+ "EventName": "PM_INST_ALL_FROM_OFF_CHIP_CACHE",
+ "BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x514048",
+ "EventName": "PM_INST_ALL_FROM_ON_CHIP_CACHE",
+ "BriefDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded either shared or modified data from another core's L2/L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x524046",
+ "EventName": "PM_INST_ALL_FROM_RL2L3_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x51404a",
+ "EventName": "PM_INST_ALL_FROM_RL2L3_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x52404a",
+ "EventName": "PM_INST_ALL_FROM_RL4",
+ "BriefDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from another chip's L4 on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x53404a",
+ "EventName": "PM_INST_ALL_FROM_RMEM",
+ "BriefDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from another chip's memory on the same Node or Group ( Remote) due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x524050",
+ "EventName": "PM_INST_ALL_GRP_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was group pump (prediction=correct) for instruction fetches and prefetches",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was group pump for an instruction fetch"
+ },
+ {,
+ "EventCode": "0x524052",
+ "EventName": "PM_INST_ALL_GRP_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (Group) ended up either larger or smaller than Initial Pump Scope for instruction fetches and prefetches",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope OR Final Pump Scope(Group) got data from source that was at smaller scope(Chip) Final pump was group pump and initial pump was chip or final and initial pump was gro"
+ },
+ {,
+ "EventCode": "0x514052",
+ "EventName": "PM_INST_ALL_GRP_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (Group) ended up larger than Initial Pump Scope (Chip) for instruction fetches and prefetches",
+ "PublicDescription": "Final Pump Scope(Group) to get data sourced, ended up larger than Initial Pump Scope (Chip) Final pump was group pump and initial pump was chip pumpfor an instruction fetch"
+ },
+ {,
+ "EventCode": "0x514054",
+ "EventName": "PM_INST_ALL_PUMP_CPRED",
+ "BriefDescription": "Pump prediction correct. Counts across all types of pumps for instruction fetches and prefetches",
+ "PublicDescription": "Pump prediction correct. Counts across all types of pumpsfor an instruction fetch"
+ },
+ {,
+ "EventCode": "0x544052",
+ "EventName": "PM_INST_ALL_PUMP_MPRED",
+ "BriefDescription": "Pump misprediction. Counts across all types of pumps for instruction fetches and prefetches",
+ "PublicDescription": "Pump Mis prediction Counts across all types of pumpsfor an instruction fetch"
+ },
+ {,
+ "EventCode": "0x534050",
+ "EventName": "PM_INST_ALL_SYS_PUMP_CPRED",
+ "BriefDescription": "Initial and Final Pump Scope was system pump (prediction=correct) for instruction fetches and prefetches",
+ "PublicDescription": "Initial and Final Pump Scope and data sourced across this scope was system pump for an instruction fetch"
+ },
+ {,
+ "EventCode": "0x534052",
+ "EventName": "PM_INST_ALL_SYS_PUMP_MPRED",
+ "BriefDescription": "Final Pump Scope (system) mispredicted. Either the original scope was too small (Chip/Group) or the original scope was System and it should have been smaller. Counts for instruction fetches and prefetches",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope(Chip/Group) OR Final Pump Scope(system) got data from source that was at smaller scope(Chip/group) Final pump was system pump and initial pump was chip or group or"
+ },
+ {,
+ "EventCode": "0x544050",
+ "EventName": "PM_INST_ALL_SYS_PUMP_MPRED_RTY",
+ "BriefDescription": "Final Pump Scope (system) ended up larger than Initial Pump Scope (Chip/Group) for instruction fetches and prefetches",
+ "PublicDescription": "Final Pump Scope(system) to get data sourced, ended up larger than Initial Pump Scope (Chip or Group) for an instruction fetch"
+ },
+ {,
+ "EventCode": "0x4080",
+ "EventName": "PM_INST_FROM_L1",
+ "BriefDescription": "Instruction fetches from L1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x44046",
+ "EventName": "PM_INST_FROM_L21_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x34046",
+ "EventName": "PM_INST_FROM_L21_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L2 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x44044",
+ "EventName": "PM_INST_FROM_L31_ECO_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x34044",
+ "EventName": "PM_INST_FROM_L31_ECO_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x24044",
+ "EventName": "PM_INST_FROM_L31_MOD",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Modified (M) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x14046",
+ "EventName": "PM_INST_FROM_L31_SHR",
+ "BriefDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded with Shared (S) data from another core's L3 on the same chip due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ },
+ {,
+ "EventCode": "0x30016",
+ "EventName": "PM_INST_IMC_MATCH_DISP",
+ "BriefDescription": "Matched Instructions Dispatched",
+ "PublicDescription": "IMC Matches dispatched"
+ },
+ {,
+ "EventCode": "0x30014",
+ "EventName": "PM_IOPS_DISP",
+ "BriefDescription": "Internal Operations dispatched",
+ "PublicDescription": "IOPS dispatched"
+ },
+ {,
+ "EventCode": "0x45046",
+ "EventName": "PM_IPTEG_FROM_L21_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x35046",
+ "EventName": "PM_IPTEG_FROM_L21_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x35040",
+ "EventName": "PM_IPTEG_FROM_L2_DISP_CONFLICT_LDHITST",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x45040",
+ "EventName": "PM_IPTEG_FROM_L2_DISP_CONFLICT_OTHER",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x45044",
+ "EventName": "PM_IPTEG_FROM_L31_ECO_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x35044",
+ "EventName": "PM_IPTEG_FROM_L31_ECO_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x25044",
+ "EventName": "PM_IPTEG_FROM_L31_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x15046",
+ "EventName": "PM_IPTEG_FROM_L31_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a instruction side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x617082",
+ "EventName": "PM_ISIDE_DISP",
+ "BriefDescription": "All i-side dispatch attempts",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x627084",
+ "EventName": "PM_ISIDE_DISP_FAIL",
+ "BriefDescription": "All i-side dispatch attempts that failed due to a addr collision with another machine",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x627086",
+ "EventName": "PM_ISIDE_DISP_FAIL_OTHER",
+ "BriefDescription": "All i-side dispatch attempts that failed due to a reason other than addrs collision",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4608e",
+ "EventName": "PM_ISIDE_L2MEMACC",
+ "BriefDescription": "valid when first beat of data comes in for an i-side fetch where data came from mem(or L4)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x44608e",
+ "EventName": "PM_ISIDE_MRU_TOUCH",
+ "BriefDescription": "Iside L2 MRU touch",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30ac",
+ "EventName": "PM_ISU_REF_FX0",
+ "BriefDescription": "FX0 ISU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30ae",
+ "EventName": "PM_ISU_REF_FX1",
+ "BriefDescription": "FX1 ISU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x38ac",
+ "EventName": "PM_ISU_REF_FXU",
+ "BriefDescription": "FXU ISU reject from either pipe",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30b0",
+ "EventName": "PM_ISU_REF_LS0",
+ "BriefDescription": "LS0 ISU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30b2",
+ "EventName": "PM_ISU_REF_LS1",
+ "BriefDescription": "LS1 ISU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30b4",
+ "EventName": "PM_ISU_REF_LS2",
+ "BriefDescription": "LS2 ISU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30b6",
+ "EventName": "PM_ISU_REF_LS3",
+ "BriefDescription": "LS3 ISU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x309c",
+ "EventName": "PM_ISU_REJECTS_ALL",
+ "BriefDescription": "All isu rejects could be more than 1 per cycle",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30a2",
+ "EventName": "PM_ISU_REJECT_RES_NA",
+ "BriefDescription": "ISU reject due to resource not available",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x309e",
+ "EventName": "PM_ISU_REJECT_SAR_BYPASS",
+ "BriefDescription": "Reject because of SAR bypass",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30a0",
+ "EventName": "PM_ISU_REJECT_SRC_NA",
+ "BriefDescription": "ISU reject due to source not available",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30a8",
+ "EventName": "PM_ISU_REJ_VS0",
+ "BriefDescription": "VS0 ISU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30aa",
+ "EventName": "PM_ISU_REJ_VS1",
+ "BriefDescription": "VS1 ISU reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x38a8",
+ "EventName": "PM_ISU_REJ_VSU",
+ "BriefDescription": "VSU ISU reject from either pipe",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30b8",
+ "EventName": "PM_ISYNC",
+ "BriefDescription": "Isync count per thread",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x200301ea",
+ "EventName": "PM_L1MISS_LAT_EXC_1024",
+ "BriefDescription": "L1 misses that took longer than 1024 cyles to resolve (miss to reload)",
+ "PublicDescription": "Reload latency exceeded 1024 cyc"
+ },
+ {,
+ "EventCode": "0x200401ec",
+ "EventName": "PM_L1MISS_LAT_EXC_2048",
+ "BriefDescription": "L1 misses that took longer than 2048 cyles to resolve (miss to reload)",
+ "PublicDescription": "Reload latency exceeded 2048 cyc"
+ },
+ {,
+ "EventCode": "0x200101e8",
+ "EventName": "PM_L1MISS_LAT_EXC_256",
+ "BriefDescription": "L1 misses that took longer than 256 cyles to resolve (miss to reload)",
+ "PublicDescription": "Reload latency exceeded 256 cyc"
+ },
+ {,
+ "EventCode": "0x200201e6",
+ "EventName": "PM_L1MISS_LAT_EXC_32",
+ "BriefDescription": "L1 misses that took longer than 32 cyles to resolve (miss to reload)",
+ "PublicDescription": "Reload latency exceeded 32 cyc"
+ },
+ {,
+ "EventCode": "0x26086",
+ "EventName": "PM_L1PF_L2MEMACC",
+ "BriefDescription": "valid when first beat of data comes in for an L1pref where data came from mem(or L4)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x408c",
+ "EventName": "PM_L1_DEMAND_WRITE",
+ "BriefDescription": "Instruction Demand sectors wriittent into IL1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x417080",
+ "EventName": "PM_L2_CASTOUT_MOD",
+ "BriefDescription": "L2 Castouts - Modified (M, Mu, Me)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x417082",
+ "EventName": "PM_L2_CASTOUT_SHR",
+ "BriefDescription": "L2 Castouts - Shared (T, Te, Si, S)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x27084",
+ "EventName": "PM_L2_CHIP_PUMP",
+ "BriefDescription": "RC requests that were local on chip pump attempts",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x427086",
+ "EventName": "PM_L2_DC_INV",
+ "BriefDescription": "Dcache invalidates from L2",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x44608c",
+ "EventName": "PM_L2_DISP_ALL_L2MISS",
+ "BriefDescription": "All successful Ld/St dispatches for this thread that were an L2miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x27086",
+ "EventName": "PM_L2_GROUP_PUMP",
+ "BriefDescription": "RC requests that were on Node Pump attempts",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x626084",
+ "EventName": "PM_L2_GRP_GUESS_CORRECT",
+ "BriefDescription": "L2 guess grp and guess was correct (data intra-6chip AND ^on-chip)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x626086",
+ "EventName": "PM_L2_GRP_GUESS_WRONG",
+ "BriefDescription": "L2 guess grp and guess was not correct (ie data on-chip OR beyond-6chip)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x427084",
+ "EventName": "PM_L2_IC_INV",
+ "BriefDescription": "Icache Invalidates from L2",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x436088",
+ "EventName": "PM_L2_INST",
+ "BriefDescription": "All successful I-side dispatches for this thread (excludes i_l2mru_tch reqs)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x43608a",
+ "EventName": "PM_L2_INST_MISS",
+ "BriefDescription": "All successful i-side dispatches that were an L2miss for this thread (excludes i_l2mru_tch reqs)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x416080",
+ "EventName": "PM_L2_LD",
+ "BriefDescription": "All successful D-side Load dispatches for this thread",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x437088",
+ "EventName": "PM_L2_LD_DISP",
+ "BriefDescription": "All successful load dispatches",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x43708a",
+ "EventName": "PM_L2_LD_HIT",
+ "BriefDescription": "All successful load dispatches that were L2 hits",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x426084",
+ "EventName": "PM_L2_LD_MISS",
+ "BriefDescription": "All successful D-Side Load dispatches that were an L2miss for this thread",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x616080",
+ "EventName": "PM_L2_LOC_GUESS_CORRECT",
+ "BriefDescription": "L2 guess loc and guess was correct (ie data local)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x616082",
+ "EventName": "PM_L2_LOC_GUESS_WRONG",
+ "BriefDescription": "L2 guess loc and guess was not correct (ie data not on chip)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x516080",
+ "EventName": "PM_L2_RCLD_DISP",
+ "BriefDescription": "L2 RC load dispatch attempt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x516082",
+ "EventName": "PM_L2_RCLD_DISP_FAIL_ADDR",
+ "BriefDescription": "L2 RC load dispatch attempt failed due to address collision with RC/CO/SN/SQ",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x526084",
+ "EventName": "PM_L2_RCLD_DISP_FAIL_OTHER",
+ "BriefDescription": "L2 RC load dispatch attempt failed due to other reasons",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x536088",
+ "EventName": "PM_L2_RCST_DISP",
+ "BriefDescription": "L2 RC store dispatch attempt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x53608a",
+ "EventName": "PM_L2_RCST_DISP_FAIL_ADDR",
+ "BriefDescription": "L2 RC store dispatch attempt failed due to address collision with RC/CO/SN/SQ",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x54608c",
+ "EventName": "PM_L2_RCST_DISP_FAIL_OTHER",
+ "BriefDescription": "L2 RC store dispatch attempt failed due to other reasons",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x537088",
+ "EventName": "PM_L2_RC_ST_DONE",
+ "BriefDescription": "RC did st to line that was Tx or Sx",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x63708a",
+ "EventName": "PM_L2_RTY_LD",
+ "BriefDescription": "RC retries on PB for any load from core",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3708a",
+ "EventName": "PM_L2_RTY_ST",
+ "BriefDescription": "RC retries on PB for any store from core",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x54708c",
+ "EventName": "PM_L2_SN_M_RD_DONE",
+ "BriefDescription": "SNP dispatched for a read and was M",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x54708e",
+ "EventName": "PM_L2_SN_M_WR_DONE",
+ "BriefDescription": "SNP dispatched for a write and was M",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x53708a",
+ "EventName": "PM_L2_SN_SX_I_DONE",
+ "BriefDescription": "SNP dispatched and went from Sx or Tx to Ix",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x17080",
+ "EventName": "PM_L2_ST",
+ "BriefDescription": "All successful D-side store dispatches for this thread",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x44708c",
+ "EventName": "PM_L2_ST_DISP",
+ "BriefDescription": "All successful store dispatches",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x44708e",
+ "EventName": "PM_L2_ST_HIT",
+ "BriefDescription": "All successful store dispatches that were L2Hits",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x17082",
+ "EventName": "PM_L2_ST_MISS",
+ "BriefDescription": "All successful D-side store dispatches for this thread that were L2 Miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x636088",
+ "EventName": "PM_L2_SYS_GUESS_CORRECT",
+ "BriefDescription": "L2 guess sys and guess was correct (ie data beyond-6chip)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x63608a",
+ "EventName": "PM_L2_SYS_GUESS_WRONG",
+ "BriefDescription": "L2 guess sys and guess was not correct (ie data ^beyond-6chip)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x617080",
+ "EventName": "PM_L2_SYS_PUMP",
+ "BriefDescription": "RC requests that were system pump attempts",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e05e",
+ "EventName": "PM_L2_TM_REQ_ABORT",
+ "BriefDescription": "TM abort",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e05c",
+ "EventName": "PM_L2_TM_ST_ABORT_SISTER",
+ "BriefDescription": "TM marked store abort",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x23808a",
+ "EventName": "PM_L3_CINJ",
+ "BriefDescription": "l3 ci of cache inject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x128084",
+ "EventName": "PM_L3_CI_HIT",
+ "BriefDescription": "L3 Castins Hit (total count",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x128086",
+ "EventName": "PM_L3_CI_MISS",
+ "BriefDescription": "L3 castins miss (total count",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x819082",
+ "EventName": "PM_L3_CI_USAGE",
+ "BriefDescription": "rotating sample of 16 CI or CO actives",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x438088",
+ "EventName": "PM_L3_CO",
+ "BriefDescription": "l3 castout occurring ( does not include casthrough or log writes (cinj/dmaw)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x83908b",
+ "EventName": "PM_L3_CO0_ALLOC",
+ "BriefDescription": "lifetime, sample of CO machine 0 valid",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0x83908a",
+ "EventName": "PM_L3_CO0_BUSY",
+ "BriefDescription": "lifetime, sample of CO machine 0 valid",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x28086",
+ "EventName": "PM_L3_CO_L31",
+ "BriefDescription": "L3 CO to L3.1 OR of port 0 and 1 ( lossy)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x238088",
+ "EventName": "PM_L3_CO_LCO",
+ "BriefDescription": "Total L3 castouts occurred on LCO",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x28084",
+ "EventName": "PM_L3_CO_MEM",
+ "BriefDescription": "L3 CO to memory OR of port 0 and 1 ( lossy)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb19082",
+ "EventName": "PM_L3_GRP_GUESS_CORRECT",
+ "BriefDescription": "Initial scope=group and data from same group (near) (pred successful)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb3908a",
+ "EventName": "PM_L3_GRP_GUESS_WRONG_HIGH",
+ "BriefDescription": "Initial scope=group but data from local node. Predition too high",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb39088",
+ "EventName": "PM_L3_GRP_GUESS_WRONG_LOW",
+ "BriefDescription": "Initial scope=group but data from outside group (far or rem). Prediction too Low",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x218080",
+ "EventName": "PM_L3_HIT",
+ "BriefDescription": "L3 Hits",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x138088",
+ "EventName": "PM_L3_L2_CO_HIT",
+ "BriefDescription": "L2 castout hits",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x13808a",
+ "EventName": "PM_L3_L2_CO_MISS",
+ "BriefDescription": "L2 castout miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x14808c",
+ "EventName": "PM_L3_LAT_CI_HIT",
+ "BriefDescription": "L3 Lateral Castins Hit",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x14808e",
+ "EventName": "PM_L3_LAT_CI_MISS",
+ "BriefDescription": "L3 Lateral Castins Miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x228084",
+ "EventName": "PM_L3_LD_HIT",
+ "BriefDescription": "L3 demand LD Hits",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x228086",
+ "EventName": "PM_L3_LD_MISS",
+ "BriefDescription": "L3 demand LD Miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e052",
+ "EventName": "PM_L3_LD_PREF",
+ "BriefDescription": "L3 Load Prefetches",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb19080",
+ "EventName": "PM_L3_LOC_GUESS_CORRECT",
+ "BriefDescription": "initial scope=node/chip and data from local node (local) (pred successful)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb29086",
+ "EventName": "PM_L3_LOC_GUESS_WRONG",
+ "BriefDescription": "Initial scope=node but data from out side local node (near or far or rem). Prediction too Low",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x218082",
+ "EventName": "PM_L3_MISS",
+ "BriefDescription": "L3 Misses",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x54808c",
+ "EventName": "PM_L3_P0_CO_L31",
+ "BriefDescription": "l3 CO to L3.1 (lco) port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x538088",
+ "EventName": "PM_L3_P0_CO_MEM",
+ "BriefDescription": "l3 CO to memory port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x929084",
+ "EventName": "PM_L3_P0_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa29084",
+ "EventName": "PM_L3_P0_GRP_PUMP",
+ "BriefDescription": "L3 pf sent with grp scope port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x528084",
+ "EventName": "PM_L3_P0_LCO_DATA",
+ "BriefDescription": "lco sent with data port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x518080",
+ "EventName": "PM_L3_P0_LCO_NO_DATA",
+ "BriefDescription": "dataless l3 lco sent port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa4908c",
+ "EventName": "PM_L3_P0_LCO_RTY",
+ "BriefDescription": "L3 LCO received retry port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa19080",
+ "EventName": "PM_L3_P0_NODE_PUMP",
+ "BriefDescription": "L3 pf sent with nodal scope port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x919080",
+ "EventName": "PM_L3_P0_PF_RTY",
+ "BriefDescription": "L3 PF received retry port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x939088",
+ "EventName": "PM_L3_P0_SN_HIT",
+ "BriefDescription": "L3 snoop hit port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x118080",
+ "EventName": "PM_L3_P0_SN_INV",
+ "BriefDescription": "Port0 snooper detects someone doing a store to a line thats Sx",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x94908c",
+ "EventName": "PM_L3_P0_SN_MISS",
+ "BriefDescription": "L3 snoop miss port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa39088",
+ "EventName": "PM_L3_P0_SYS_PUMP",
+ "BriefDescription": "L3 pf sent with sys scope port 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x54808e",
+ "EventName": "PM_L3_P1_CO_L31",
+ "BriefDescription": "l3 CO to L3.1 (lco) port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x53808a",
+ "EventName": "PM_L3_P1_CO_MEM",
+ "BriefDescription": "l3 CO to memory port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x929086",
+ "EventName": "PM_L3_P1_CO_RTY",
+ "BriefDescription": "L3 CO received retry port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa29086",
+ "EventName": "PM_L3_P1_GRP_PUMP",
+ "BriefDescription": "L3 pf sent with grp scope port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x528086",
+ "EventName": "PM_L3_P1_LCO_DATA",
+ "BriefDescription": "lco sent with data port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x518082",
+ "EventName": "PM_L3_P1_LCO_NO_DATA",
+ "BriefDescription": "dataless l3 lco sent port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa4908e",
+ "EventName": "PM_L3_P1_LCO_RTY",
+ "BriefDescription": "L3 LCO received retry port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa19082",
+ "EventName": "PM_L3_P1_NODE_PUMP",
+ "BriefDescription": "L3 pf sent with nodal scope port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x919082",
+ "EventName": "PM_L3_P1_PF_RTY",
+ "BriefDescription": "L3 PF received retry port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x93908a",
+ "EventName": "PM_L3_P1_SN_HIT",
+ "BriefDescription": "L3 snoop hit port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x118082",
+ "EventName": "PM_L3_P1_SN_INV",
+ "BriefDescription": "Port1 snooper detects someone doing a store to a line thats Sx",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x94908e",
+ "EventName": "PM_L3_P1_SN_MISS",
+ "BriefDescription": "L3 snoop miss port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa3908a",
+ "EventName": "PM_L3_P1_SYS_PUMP",
+ "BriefDescription": "L3 pf sent with sys scope port 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x84908d",
+ "EventName": "PM_L3_PF0_ALLOC",
+ "BriefDescription": "lifetime, sample of PF machine 0 valid",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0x84908c",
+ "EventName": "PM_L3_PF0_BUSY",
+ "BriefDescription": "lifetime, sample of PF machine 0 valid",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x428084",
+ "EventName": "PM_L3_PF_HIT_L3",
+ "BriefDescription": "l3 pf hit in l3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x18080",
+ "EventName": "PM_L3_PF_MISS_L3",
+ "BriefDescription": "L3 Prefetch missed in L3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3808a",
+ "EventName": "PM_L3_PF_OFF_CHIP_CACHE",
+ "BriefDescription": "L3 Prefetch from Off chip cache",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4808e",
+ "EventName": "PM_L3_PF_OFF_CHIP_MEM",
+ "BriefDescription": "L3 Prefetch from Off chip memory",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x38088",
+ "EventName": "PM_L3_PF_ON_CHIP_CACHE",
+ "BriefDescription": "L3 Prefetch from On chip cache",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4808c",
+ "EventName": "PM_L3_PF_ON_CHIP_MEM",
+ "BriefDescription": "L3 Prefetch from On chip memory",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x829084",
+ "EventName": "PM_L3_PF_USAGE",
+ "BriefDescription": "rotating sample of 32 PF actives",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e052",
+ "EventName": "PM_L3_PREF_ALL",
+ "BriefDescription": "Total HW L3 prefetches(Load+store)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x84908f",
+ "EventName": "PM_L3_RD0_ALLOC",
+ "BriefDescription": "lifetime, sample of RD machine 0 valid",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0x84908e",
+ "EventName": "PM_L3_RD0_BUSY",
+ "BriefDescription": "lifetime, sample of RD machine 0 valid",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x829086",
+ "EventName": "PM_L3_RD_USAGE",
+ "BriefDescription": "rotating sample of 16 RD actives",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x839089",
+ "EventName": "PM_L3_SN0_ALLOC",
+ "BriefDescription": "lifetime, sample of snooper machine 0 valid",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0x839088",
+ "EventName": "PM_L3_SN0_BUSY",
+ "BriefDescription": "lifetime, sample of snooper machine 0 valid",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x819080",
+ "EventName": "PM_L3_SN_USAGE",
+ "BriefDescription": "rotating sample of 8 snoop valids",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e052",
+ "EventName": "PM_L3_ST_PREF",
+ "BriefDescription": "L3 store Prefetches",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e052",
+ "EventName": "PM_L3_SW_PREF",
+ "BriefDescription": "Data stream touchto L3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb29084",
+ "EventName": "PM_L3_SYS_GUESS_CORRECT",
+ "BriefDescription": "Initial scope=system and data from outside group (far or rem)(pred successful)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb4908c",
+ "EventName": "PM_L3_SYS_GUESS_WRONG",
+ "BriefDescription": "Initial scope=system but data from local or near. Predction too high",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x24808e",
+ "EventName": "PM_L3_TRANS_PF",
+ "BriefDescription": "L3 Transient prefetch",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x18081",
+ "EventName": "PM_L3_WI0_ALLOC",
+ "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0x418080",
+ "EventName": "PM_L3_WI0_BUSY",
+ "BriefDescription": "lifetime, sample of Write Inject machine 0 valid",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x418082",
+ "EventName": "PM_L3_WI_USAGE",
+ "BriefDescription": "rotating sample of 8 WI actives",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc080",
+ "EventName": "PM_LD_REF_L1_LSU0",
+ "BriefDescription": "LS0 L1 D cache load references counted at finish, gated by reject",
+ "PublicDescription": "LS0 L1 D cache load references counted at finish, gated by rejectLSU0 L1 D cache load references"
+ },
+ {,
+ "EventCode": "0xc082",
+ "EventName": "PM_LD_REF_L1_LSU1",
+ "BriefDescription": "LS1 L1 D cache load references counted at finish, gated by reject",
+ "PublicDescription": "LS1 L1 D cache load references counted at finish, gated by rejectLSU1 L1 D cache load references"
+ },
+ {,
+ "EventCode": "0xc094",
+ "EventName": "PM_LD_REF_L1_LSU2",
+ "BriefDescription": "LS2 L1 D cache load references counted at finish, gated by reject",
+ "PublicDescription": "LS2 L1 D cache load references counted at finish, gated by reject42"
+ },
+ {,
+ "EventCode": "0xc096",
+ "EventName": "PM_LD_REF_L1_LSU3",
+ "BriefDescription": "LS3 L1 D cache load references counted at finish, gated by reject",
+ "PublicDescription": "LS3 L1 D cache load references counted at finish, gated by reject42"
+ },
+ {,
+ "EventCode": "0x509a",
+ "EventName": "PM_LINK_STACK_INVALID_PTR",
+ "BriefDescription": "A flush were LS ptr is invalid, results in a pop , A lot of interrupts between push and pops",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x5098",
+ "EventName": "PM_LINK_STACK_WRONG_ADD_PRED",
+ "BriefDescription": "Link stack predicts wrong address, because of link stack design limitation",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xe080",
+ "EventName": "PM_LS0_ERAT_MISS_PREF",
+ "BriefDescription": "LS0 Erat miss due to prefetch",
+ "PublicDescription": "LS0 Erat miss due to prefetch42"
+ },
+ {,
+ "EventCode": "0xd0b8",
+ "EventName": "PM_LS0_L1_PREF",
+ "BriefDescription": "LS0 L1 cache data prefetches",
+ "PublicDescription": "LS0 L1 cache data prefetches42"
+ },
+ {,
+ "EventCode": "0xc098",
+ "EventName": "PM_LS0_L1_SW_PREF",
+ "BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches",
+ "PublicDescription": "Software L1 Prefetches, including SW Transient Prefetches42"
+ },
+ {,
+ "EventCode": "0xe082",
+ "EventName": "PM_LS1_ERAT_MISS_PREF",
+ "BriefDescription": "LS1 Erat miss due to prefetch",
+ "PublicDescription": "LS1 Erat miss due to prefetch42"
+ },
+ {,
+ "EventCode": "0xd0ba",
+ "EventName": "PM_LS1_L1_PREF",
+ "BriefDescription": "LS1 L1 cache data prefetches",
+ "PublicDescription": "LS1 L1 cache data prefetches42"
+ },
+ {,
+ "EventCode": "0xc09a",
+ "EventName": "PM_LS1_L1_SW_PREF",
+ "BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches",
+ "PublicDescription": "Software L1 Prefetches, including SW Transient Prefetches42"
+ },
+ {,
+ "EventCode": "0xc0b0",
+ "EventName": "PM_LSU0_FLUSH_LRQ",
+ "BriefDescription": "LS0 Flush: LRQ",
+ "PublicDescription": "LS0 Flush: LRQLSU0 LRQ flushes"
+ },
+ {,
+ "EventCode": "0xc0b8",
+ "EventName": "PM_LSU0_FLUSH_SRQ",
+ "BriefDescription": "LS0 Flush: SRQ",
+ "PublicDescription": "LS0 Flush: SRQLSU0 SRQ lhs flushes"
+ },
+ {,
+ "EventCode": "0xc0a4",
+ "EventName": "PM_LSU0_FLUSH_ULD",
+ "BriefDescription": "LS0 Flush: Unaligned Load",
+ "PublicDescription": "LS0 Flush: Unaligned LoadLSU0 unaligned load flushes"
+ },
+ {,
+ "EventCode": "0xc0ac",
+ "EventName": "PM_LSU0_FLUSH_UST",
+ "BriefDescription": "LS0 Flush: Unaligned Store",
+ "PublicDescription": "LS0 Flush: Unaligned StoreLSU0 unaligned store flushes"
+ },
+ {,
+ "EventCode": "0xf088",
+ "EventName": "PM_LSU0_L1_CAM_CANCEL",
+ "BriefDescription": "ls0 l1 tm cam cancel",
+ "PublicDescription": "ls0 l1 tm cam cancel42"
+ },
+ {,
+ "EventCode": "0x1e056",
+ "EventName": "PM_LSU0_LARX_FIN",
+ "BriefDescription": "Larx finished in LSU pipe0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xd08c",
+ "EventName": "PM_LSU0_LMQ_LHR_MERGE",
+ "BriefDescription": "LS0 Load Merged with another cacheline request",
+ "PublicDescription": "LS0 Load Merged with another cacheline request42"
+ },
+ {,
+ "EventCode": "0xc08c",
+ "EventName": "PM_LSU0_NCLD",
+ "BriefDescription": "LS0 Non-cachable Loads counted at finish",
+ "PublicDescription": "LS0 Non-cachable Loads counted at finishLSU0 non-cacheable loads"
+ },
+ {,
+ "EventCode": "0xe090",
+ "EventName": "PM_LSU0_PRIMARY_ERAT_HIT",
+ "BriefDescription": "Primary ERAT hit",
+ "PublicDescription": "Primary ERAT hit42"
+ },
+ {,
+ "EventCode": "0x1e05a",
+ "EventName": "PM_LSU0_REJECT",
+ "BriefDescription": "LSU0 reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc09c",
+ "EventName": "PM_LSU0_SRQ_STFWD",
+ "BriefDescription": "LS0 SRQ forwarded data to a load",
+ "PublicDescription": "LS0 SRQ forwarded data to a loadLSU0 SRQ store forwarded"
+ },
+ {,
+ "EventCode": "0xf084",
+ "EventName": "PM_LSU0_STORE_REJECT",
+ "BriefDescription": "ls0 store reject",
+ "PublicDescription": "ls0 store reject42"
+ },
+ {,
+ "EventCode": "0xe0a8",
+ "EventName": "PM_LSU0_TMA_REQ_L2",
+ "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding",
+ "PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42"
+ },
+ {,
+ "EventCode": "0xe098",
+ "EventName": "PM_LSU0_TM_L1_HIT",
+ "BriefDescription": "Load tm hit in L1",
+ "PublicDescription": "Load tm hit in L142"
+ },
+ {,
+ "EventCode": "0xe0a0",
+ "EventName": "PM_LSU0_TM_L1_MISS",
+ "BriefDescription": "Load tm L1 miss",
+ "PublicDescription": "Load tm L1 miss42"
+ },
+ {,
+ "EventCode": "0xc0b2",
+ "EventName": "PM_LSU1_FLUSH_LRQ",
+ "BriefDescription": "LS1 Flush: LRQ",
+ "PublicDescription": "LS1 Flush: LRQLSU1 LRQ flushes"
+ },
+ {,
+ "EventCode": "0xc0ba",
+ "EventName": "PM_LSU1_FLUSH_SRQ",
+ "BriefDescription": "LS1 Flush: SRQ",
+ "PublicDescription": "LS1 Flush: SRQLSU1 SRQ lhs flushes"
+ },
+ {,
+ "EventCode": "0xc0a6",
+ "EventName": "PM_LSU1_FLUSH_ULD",
+ "BriefDescription": "LS 1 Flush: Unaligned Load",
+ "PublicDescription": "LS 1 Flush: Unaligned LoadLSU1 unaligned load flushes"
+ },
+ {,
+ "EventCode": "0xc0ae",
+ "EventName": "PM_LSU1_FLUSH_UST",
+ "BriefDescription": "LS1 Flush: Unaligned Store",
+ "PublicDescription": "LS1 Flush: Unaligned StoreLSU1 unaligned store flushes"
+ },
+ {,
+ "EventCode": "0xf08a",
+ "EventName": "PM_LSU1_L1_CAM_CANCEL",
+ "BriefDescription": "ls1 l1 tm cam cancel",
+ "PublicDescription": "ls1 l1 tm cam cancel42"
+ },
+ {,
+ "EventCode": "0x2e056",
+ "EventName": "PM_LSU1_LARX_FIN",
+ "BriefDescription": "Larx finished in LSU pipe1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xd08e",
+ "EventName": "PM_LSU1_LMQ_LHR_MERGE",
+ "BriefDescription": "LS1 Load Merge with another cacheline request",
+ "PublicDescription": "LS1 Load Merge with another cacheline request42"
+ },
+ {,
+ "EventCode": "0xc08e",
+ "EventName": "PM_LSU1_NCLD",
+ "BriefDescription": "LS1 Non-cachable Loads counted at finish",
+ "PublicDescription": "LS1 Non-cachable Loads counted at finishLSU1 non-cacheable loads"
+ },
+ {,
+ "EventCode": "0xe092",
+ "EventName": "PM_LSU1_PRIMARY_ERAT_HIT",
+ "BriefDescription": "Primary ERAT hit",
+ "PublicDescription": "Primary ERAT hit42"
+ },
+ {,
+ "EventCode": "0x2e05a",
+ "EventName": "PM_LSU1_REJECT",
+ "BriefDescription": "LSU1 reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc09e",
+ "EventName": "PM_LSU1_SRQ_STFWD",
+ "BriefDescription": "LS1 SRQ forwarded data to a load",
+ "PublicDescription": "LS1 SRQ forwarded data to a loadLSU1 SRQ store forwarded"
+ },
+ {,
+ "EventCode": "0xf086",
+ "EventName": "PM_LSU1_STORE_REJECT",
+ "BriefDescription": "ls1 store reject",
+ "PublicDescription": "ls1 store reject42"
+ },
+ {,
+ "EventCode": "0xe0aa",
+ "EventName": "PM_LSU1_TMA_REQ_L2",
+ "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding",
+ "PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42"
+ },
+ {,
+ "EventCode": "0xe09a",
+ "EventName": "PM_LSU1_TM_L1_HIT",
+ "BriefDescription": "Load tm hit in L1",
+ "PublicDescription": "Load tm hit in L142"
+ },
+ {,
+ "EventCode": "0xe0a2",
+ "EventName": "PM_LSU1_TM_L1_MISS",
+ "BriefDescription": "Load tm L1 miss",
+ "PublicDescription": "Load tm L1 miss42"
+ },
+ {,
+ "EventCode": "0xc0b4",
+ "EventName": "PM_LSU2_FLUSH_LRQ",
+ "BriefDescription": "LS02Flush: LRQ",
+ "PublicDescription": "LS02Flush: LRQ42"
+ },
+ {,
+ "EventCode": "0xc0bc",
+ "EventName": "PM_LSU2_FLUSH_SRQ",
+ "BriefDescription": "LS2 Flush: SRQ",
+ "PublicDescription": "LS2 Flush: SRQ42"
+ },
+ {,
+ "EventCode": "0xc0a8",
+ "EventName": "PM_LSU2_FLUSH_ULD",
+ "BriefDescription": "LS3 Flush: Unaligned Load",
+ "PublicDescription": "LS3 Flush: Unaligned Load42"
+ },
+ {,
+ "EventCode": "0xf08c",
+ "EventName": "PM_LSU2_L1_CAM_CANCEL",
+ "BriefDescription": "ls2 l1 tm cam cancel",
+ "PublicDescription": "ls2 l1 tm cam cancel42"
+ },
+ {,
+ "EventCode": "0x3e056",
+ "EventName": "PM_LSU2_LARX_FIN",
+ "BriefDescription": "Larx finished in LSU pipe2",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc084",
+ "EventName": "PM_LSU2_LDF",
+ "BriefDescription": "LS2 Scalar Loads",
+ "PublicDescription": "LS2 Scalar Loads42"
+ },
+ {,
+ "EventCode": "0xc088",
+ "EventName": "PM_LSU2_LDX",
+ "BriefDescription": "LS0 Vector Loads",
+ "PublicDescription": "LS0 Vector Loads42"
+ },
+ {,
+ "EventCode": "0xd090",
+ "EventName": "PM_LSU2_LMQ_LHR_MERGE",
+ "BriefDescription": "LS0 Load Merged with another cacheline request",
+ "PublicDescription": "LS0 Load Merged with another cacheline request42"
+ },
+ {,
+ "EventCode": "0xe094",
+ "EventName": "PM_LSU2_PRIMARY_ERAT_HIT",
+ "BriefDescription": "Primary ERAT hit",
+ "PublicDescription": "Primary ERAT hit42"
+ },
+ {,
+ "EventCode": "0x3e05a",
+ "EventName": "PM_LSU2_REJECT",
+ "BriefDescription": "LSU2 reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc0a0",
+ "EventName": "PM_LSU2_SRQ_STFWD",
+ "BriefDescription": "LS2 SRQ forwarded data to a load",
+ "PublicDescription": "LS2 SRQ forwarded data to a load42"
+ },
+ {,
+ "EventCode": "0xe0ac",
+ "EventName": "PM_LSU2_TMA_REQ_L2",
+ "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding",
+ "PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42"
+ },
+ {,
+ "EventCode": "0xe09c",
+ "EventName": "PM_LSU2_TM_L1_HIT",
+ "BriefDescription": "Load tm hit in L1",
+ "PublicDescription": "Load tm hit in L142"
+ },
+ {,
+ "EventCode": "0xe0a4",
+ "EventName": "PM_LSU2_TM_L1_MISS",
+ "BriefDescription": "Load tm L1 miss",
+ "PublicDescription": "Load tm L1 miss42"
+ },
+ {,
+ "EventCode": "0xc0b6",
+ "EventName": "PM_LSU3_FLUSH_LRQ",
+ "BriefDescription": "LS3 Flush: LRQ",
+ "PublicDescription": "LS3 Flush: LRQ42"
+ },
+ {,
+ "EventCode": "0xc0be",
+ "EventName": "PM_LSU3_FLUSH_SRQ",
+ "BriefDescription": "LS13 Flush: SRQ",
+ "PublicDescription": "LS13 Flush: SRQ42"
+ },
+ {,
+ "EventCode": "0xc0aa",
+ "EventName": "PM_LSU3_FLUSH_ULD",
+ "BriefDescription": "LS 14Flush: Unaligned Load",
+ "PublicDescription": "LS 14Flush: Unaligned Load42"
+ },
+ {,
+ "EventCode": "0xf08e",
+ "EventName": "PM_LSU3_L1_CAM_CANCEL",
+ "BriefDescription": "ls3 l1 tm cam cancel",
+ "PublicDescription": "ls3 l1 tm cam cancel42"
+ },
+ {,
+ "EventCode": "0x4e056",
+ "EventName": "PM_LSU3_LARX_FIN",
+ "BriefDescription": "Larx finished in LSU pipe3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc086",
+ "EventName": "PM_LSU3_LDF",
+ "BriefDescription": "LS3 Scalar Loads",
+ "PublicDescription": "LS3 Scalar Loads 42"
+ },
+ {,
+ "EventCode": "0xc08a",
+ "EventName": "PM_LSU3_LDX",
+ "BriefDescription": "LS1 Vector Loads",
+ "PublicDescription": "LS1 Vector Loads42"
+ },
+ {,
+ "EventCode": "0xd092",
+ "EventName": "PM_LSU3_LMQ_LHR_MERGE",
+ "BriefDescription": "LS1 Load Merge with another cacheline request",
+ "PublicDescription": "LS1 Load Merge with another cacheline request42"
+ },
+ {,
+ "EventCode": "0xe096",
+ "EventName": "PM_LSU3_PRIMARY_ERAT_HIT",
+ "BriefDescription": "Primary ERAT hit",
+ "PublicDescription": "Primary ERAT hit42"
+ },
+ {,
+ "EventCode": "0x4e05a",
+ "EventName": "PM_LSU3_REJECT",
+ "BriefDescription": "LSU3 reject",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc0a2",
+ "EventName": "PM_LSU3_SRQ_STFWD",
+ "BriefDescription": "LS3 SRQ forwarded data to a load",
+ "PublicDescription": "LS3 SRQ forwarded data to a load42"
+ },
+ {,
+ "EventCode": "0xe0ae",
+ "EventName": "PM_LSU3_TMA_REQ_L2",
+ "BriefDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding",
+ "PublicDescription": "addrs only req to L2 only on the first one,Indication that Load footprint is not expanding42"
+ },
+ {,
+ "EventCode": "0xe09e",
+ "EventName": "PM_LSU3_TM_L1_HIT",
+ "BriefDescription": "Load tm hit in L1",
+ "PublicDescription": "Load tm hit in L142"
+ },
+ {,
+ "EventCode": "0xe0a6",
+ "EventName": "PM_LSU3_TM_L1_MISS",
+ "BriefDescription": "Load tm L1 miss",
+ "PublicDescription": "Load tm L1 miss42"
+ },
+ {,
+ "EventCode": "0xe880",
+ "EventName": "PM_LSU_ERAT_MISS_PREF",
+ "BriefDescription": "Erat miss due to prefetch, on either pipe",
+ "PublicDescription": "LSU"
+ },
+ {,
+ "EventCode": "0xc8ac",
+ "EventName": "PM_LSU_FLUSH_UST",
+ "BriefDescription": "Unaligned Store Flush on either pipe",
+ "PublicDescription": "LSU"
+ },
+ {,
+ "EventCode": "0xd0a4",
+ "EventName": "PM_LSU_FOUR_TABLEWALK_CYC",
+ "BriefDescription": "Cycles when four tablewalks pending on this thread",
+ "PublicDescription": "Cycles when four tablewalks pending on this thread42"
+ },
+ {,
+ "EventCode": "0x10066",
+ "EventName": "PM_LSU_FX_FIN",
+ "BriefDescription": "LSU Finished a FX operation (up to 2 per cycle",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xd8b8",
+ "EventName": "PM_LSU_L1_PREF",
+ "BriefDescription": "hw initiated , include sw streaming forms as well , include sw streams as a separate event",
+ "PublicDescription": "LSU"
+ },
+ {,
+ "EventCode": "0xc898",
+ "EventName": "PM_LSU_L1_SW_PREF",
+ "BriefDescription": "Software L1 Prefetches, including SW Transient Prefetches, on both pipes",
+ "PublicDescription": "LSU"
+ },
+ {,
+ "EventCode": "0xc884",
+ "EventName": "PM_LSU_LDF",
+ "BriefDescription": "FPU loads only on LS2/LS3 ie LU0/LU1",
+ "PublicDescription": "LSU"
+ },
+ {,
+ "EventCode": "0xc888",
+ "EventName": "PM_LSU_LDX",
+ "BriefDescription": "Vector loads can issue only on LS2/LS3",
+ "PublicDescription": "LSU"
+ },
+ {,
+ "EventCode": "0xd0a2",
+ "EventName": "PM_LSU_LMQ_FULL_CYC",
+ "BriefDescription": "LMQ full",
+ "PublicDescription": "LMQ fullCycles LMQ full"
+ },
+ {,
+ "EventCode": "0xd0a1",
+ "EventName": "PM_LSU_LMQ_S0_ALLOC",
+ "BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0xd0a0",
+ "EventName": "PM_LSU_LMQ_S0_VALID",
+ "BriefDescription": "Slot 0 of LMQ valid",
+ "PublicDescription": "Slot 0 of LMQ validLMQ slot 0 valid"
+ },
+ {,
+ "EventCode": "0x3001c",
+ "EventName": "PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC",
+ "BriefDescription": "ALL threads lsu empty (lmq and srq empty)",
+ "PublicDescription": "ALL threads lsu empty (lmq and srq empty). Issue HW016541"
+ },
+ {,
+ "EventCode": "0xd09f",
+ "EventName": "PM_LSU_LRQ_S0_ALLOC",
+ "BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0xd09e",
+ "EventName": "PM_LSU_LRQ_S0_VALID",
+ "BriefDescription": "Slot 0 of LRQ valid",
+ "PublicDescription": "Slot 0 of LRQ validLRQ slot 0 valid"
+ },
+ {,
+ "EventCode": "0xf091",
+ "EventName": "PM_LSU_LRQ_S43_ALLOC",
+ "BriefDescription": "LRQ slot 43 was released",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0xf090",
+ "EventName": "PM_LSU_LRQ_S43_VALID",
+ "BriefDescription": "LRQ slot 43 was busy",
+ "PublicDescription": "LRQ slot 43 was busy42"
+ },
+ {,
+ "EventCode": "0x30162",
+ "EventName": "PM_LSU_MRK_DERAT_MISS",
+ "BriefDescription": "DERAT Reloaded (Miss)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc88c",
+ "EventName": "PM_LSU_NCLD",
+ "BriefDescription": "count at finish so can return only on ls0 or ls1",
+ "PublicDescription": "LSU"
+ },
+ {,
+ "EventCode": "0xc092",
+ "EventName": "PM_LSU_NCST",
+ "BriefDescription": "Non-cachable Stores sent to nest",
+ "PublicDescription": "Non-cachable Stores sent to nest42"
+ },
+ {,
+ "EventCode": "0x10064",
+ "EventName": "PM_LSU_REJECT",
+ "BriefDescription": "LSU Reject (up to 4 per cycle)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xd082",
+ "EventName": "PM_LSU_SET_MPRED",
+ "BriefDescription": "Line already in cache at reload time",
+ "PublicDescription": "Line already in cache at reload time42"
+ },
+ {,
+ "EventCode": "0x40008",
+ "EventName": "PM_LSU_SRQ_EMPTY_CYC",
+ "BriefDescription": "ALL threads srq empty",
+ "PublicDescription": "All threads srq empty"
+ },
+ {,
+ "EventCode": "0xd09d",
+ "EventName": "PM_LSU_SRQ_S0_ALLOC",
+ "BriefDescription": "Per thread - use edge detect to count allocates On a per thread basis, level signal indicating Slot 0 is valid. By instrumenting a single slot we can calculate service time for that slot. Previous machines required a separate signal indicating the slot was allocated. Because any signal can be routed to any counter in P8, we can count level in one PMC and edge detect in another PMC using the same signal",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0xd09c",
+ "EventName": "PM_LSU_SRQ_S0_VALID",
+ "BriefDescription": "Slot 0 of SRQ valid",
+ "PublicDescription": "Slot 0 of SRQ validSRQ slot 0 valid"
+ },
+ {,
+ "EventCode": "0xf093",
+ "EventName": "PM_LSU_SRQ_S39_ALLOC",
+ "BriefDescription": "SRQ slot 39 was released",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0xf092",
+ "EventName": "PM_LSU_SRQ_S39_VALID",
+ "BriefDescription": "SRQ slot 39 was busy",
+ "PublicDescription": "SRQ slot 39 was busy42"
+ },
+ {,
+ "EventCode": "0xd09b",
+ "EventName": "PM_LSU_SRQ_SYNC",
+ "BriefDescription": "A sync in the SRQ ended",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0xd09a",
+ "EventName": "PM_LSU_SRQ_SYNC_CYC",
+ "BriefDescription": "A sync is in the SRQ (edge detect to count)",
+ "PublicDescription": "A sync is in the SRQ (edge detect to count)SRQ sync duration"
+ },
+ {,
+ "EventCode": "0xf084",
+ "EventName": "PM_LSU_STORE_REJECT",
+ "BriefDescription": "Store reject on either pipe",
+ "PublicDescription": "LSU"
+ },
+ {,
+ "EventCode": "0xd0a6",
+ "EventName": "PM_LSU_TWO_TABLEWALK_CYC",
+ "BriefDescription": "Cycles when two tablewalks pending on this thread",
+ "PublicDescription": "Cycles when two tablewalks pending on this thread42"
+ },
+ {,
+ "EventCode": "0x5094",
+ "EventName": "PM_LWSYNC",
+ "BriefDescription": "threaded version, IC Misses where we got EA dir hit but no sector valids were on. ICBI took line out",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x209a",
+ "EventName": "PM_LWSYNC_HELD",
+ "BriefDescription": "LWSYNC held at dispatch",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3013a",
+ "EventName": "PM_MRK_CRU_FIN",
+ "BriefDescription": "IFU non-branch finished",
+ "PublicDescription": "IFU non-branch marked instruction finished"
+ },
+ {,
+ "EventCode": "0x4d146",
+ "EventName": "PM_MRK_DATA_FROM_L21_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L2 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d126",
+ "EventName": "PM_MRK_DATA_FROM_L21_MOD_CYC",
+ "BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's L2 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d146",
+ "EventName": "PM_MRK_DATA_FROM_L21_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L2 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c126",
+ "EventName": "PM_MRK_DATA_FROM_L21_SHR_CYC",
+ "BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L2 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d144",
+ "EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's ECO L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d124",
+ "EventName": "PM_MRK_DATA_FROM_L31_ECO_MOD_CYC",
+ "BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's ECO L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d144",
+ "EventName": "PM_MRK_DATA_FROM_L31_ECO_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's ECO L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c124",
+ "EventName": "PM_MRK_DATA_FROM_L31_ECO_SHR_CYC",
+ "BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's ECO L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d144",
+ "EventName": "PM_MRK_DATA_FROM_L31_MOD",
+ "BriefDescription": "The processor's data cache was reloaded with Modified (M) data from another core's L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d124",
+ "EventName": "PM_MRK_DATA_FROM_L31_MOD_CYC",
+ "BriefDescription": "Duration in cycles to reload with Modified (M) data from another core's L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d146",
+ "EventName": "PM_MRK_DATA_FROM_L31_SHR",
+ "BriefDescription": "The processor's data cache was reloaded with Shared (S) data from another core's L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c126",
+ "EventName": "PM_MRK_DATA_FROM_L31_SHR_CYC",
+ "BriefDescription": "Duration in cycles to reload with Shared (S) data from another core's L3 on the same chip due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x201e0",
+ "EventName": "PM_MRK_DATA_FROM_MEM",
+ "BriefDescription": "The processor's data cache was reloaded from a memory location including L4 from local remote or distant due to a marked load",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f146",
+ "EventName": "PM_MRK_DPTEG_FROM_L21_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L2 on the same chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3f146",
+ "EventName": "PM_MRK_DPTEG_FROM_L21_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L2 on the same chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3f140",
+ "EventName": "PM_MRK_DPTEG_FROM_L2_DISP_CONFLICT_LDHITST",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with load hit store conflict due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f140",
+ "EventName": "PM_MRK_DPTEG_FROM_L2_DISP_CONFLICT_OTHER",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 with dispatch conflict due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f144",
+ "EventName": "PM_MRK_DPTEG_FROM_L31_ECO_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's ECO L3 on the same chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3f144",
+ "EventName": "PM_MRK_DPTEG_FROM_L31_ECO_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's ECO L3 on the same chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2f144",
+ "EventName": "PM_MRK_DPTEG_FROM_L31_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another core's L3 on the same chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1f146",
+ "EventName": "PM_MRK_DPTEG_FROM_L31_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another core's L3 on the same chip due to a marked data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30156",
+ "EventName": "PM_MRK_FAB_RSP_MATCH",
+ "BriefDescription": "ttype and cresp matched as specified in MMCR1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4f152",
+ "EventName": "PM_MRK_FAB_RSP_MATCH_CYC",
+ "BriefDescription": "cresp/ttype match cycles",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2013c",
+ "EventName": "PM_MRK_FILT_MATCH",
+ "BriefDescription": "Marked filter Match",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1013c",
+ "EventName": "PM_MRK_FIN_STALL_CYC",
+ "BriefDescription": "Marked instruction Finish Stall cycles (marked finish after NTC) (use edge detect to count )",
+ "PublicDescription": "Marked instruction Finish Stall cycles (marked finish after NTC) (use edge detect to count #)"
+ },
+ {,
+ "EventCode": "0x40130",
+ "EventName": "PM_MRK_GRP_CMPL",
+ "BriefDescription": "marked instruction finished (completed)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4013a",
+ "EventName": "PM_MRK_GRP_IC_MISS",
+ "BriefDescription": "Marked Group experienced I cache miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3013c",
+ "EventName": "PM_MRK_GRP_NTC",
+ "BriefDescription": "Marked group ntc cycles",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1013f",
+ "EventName": "PM_MRK_LD_MISS_EXPOSED",
+ "BriefDescription": "Marked Load exposed Miss (exposed period ended)",
+ "PublicDescription": "Marked Load exposed Miss (use edge detect to count #)"
+ },
+ {,
+ "EventCode": "0xd180",
+ "EventName": "PM_MRK_LSU_FLUSH",
+ "BriefDescription": "Flush: (marked) : All Cases",
+ "PublicDescription": "Flush: (marked) : All Cases42"
+ },
+ {,
+ "EventCode": "0xd188",
+ "EventName": "PM_MRK_LSU_FLUSH_LRQ",
+ "BriefDescription": "Flush: (marked) LRQ",
+ "PublicDescription": "Flush: (marked) LRQMarked LRQ flushes"
+ },
+ {,
+ "EventCode": "0xd18a",
+ "EventName": "PM_MRK_LSU_FLUSH_SRQ",
+ "BriefDescription": "Flush: (marked) SRQ",
+ "PublicDescription": "Flush: (marked) SRQMarked SRQ lhs flushes"
+ },
+ {,
+ "EventCode": "0xd184",
+ "EventName": "PM_MRK_LSU_FLUSH_ULD",
+ "BriefDescription": "Flush: (marked) Unaligned Load",
+ "PublicDescription": "Flush: (marked) Unaligned LoadMarked unaligned load flushes"
+ },
+ {,
+ "EventCode": "0xd186",
+ "EventName": "PM_MRK_LSU_FLUSH_UST",
+ "BriefDescription": "Flush: (marked) Unaligned Store",
+ "PublicDescription": "Flush: (marked) Unaligned StoreMarked unaligned store flushes"
+ },
+ {,
+ "EventCode": "0x40164",
+ "EventName": "PM_MRK_LSU_REJECT",
+ "BriefDescription": "LSU marked reject (up to 2 per cycle)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30164",
+ "EventName": "PM_MRK_LSU_REJECT_ERAT_MISS",
+ "BriefDescription": "LSU marked reject due to ERAT (up to 2 per cycle)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d15a",
+ "EventName": "PM_MRK_SRC_PREF_TRACK_EFF",
+ "BriefDescription": "Marked src pref track was effective",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d15a",
+ "EventName": "PM_MRK_SRC_PREF_TRACK_INEFF",
+ "BriefDescription": "Prefetch tracked was ineffective for marked src",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d15c",
+ "EventName": "PM_MRK_SRC_PREF_TRACK_MOD",
+ "BriefDescription": "Prefetch tracked was moderate for marked src",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1d15c",
+ "EventName": "PM_MRK_SRC_PREF_TRACK_MOD_L2",
+ "BriefDescription": "Marked src Prefetch Tracked was moderate (source L2)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3d15c",
+ "EventName": "PM_MRK_SRC_PREF_TRACK_MOD_L3",
+ "BriefDescription": "Prefetch tracked was moderate (L3 hit) for marked src",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1c15a",
+ "EventName": "PM_MRK_TGT_PREF_TRACK_EFF",
+ "BriefDescription": "Marked target pref track was effective",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3c15a",
+ "EventName": "PM_MRK_TGT_PREF_TRACK_INEFF",
+ "BriefDescription": "Prefetch tracked was ineffective for marked target",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c15c",
+ "EventName": "PM_MRK_TGT_PREF_TRACK_MOD",
+ "BriefDescription": "Prefetch tracked was moderate for marked target",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1c15c",
+ "EventName": "PM_MRK_TGT_PREF_TRACK_MOD_L2",
+ "BriefDescription": "Marked target Prefetch Tracked was moderate (source L2)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3c15c",
+ "EventName": "PM_MRK_TGT_PREF_TRACK_MOD_L3",
+ "BriefDescription": "Prefetch tracked was moderate (L3 hit) for marked target",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20b0",
+ "EventName": "PM_NESTED_TEND",
+ "BriefDescription": "Completion time nested tend",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20b6",
+ "EventName": "PM_NON_FAV_TBEGIN",
+ "BriefDescription": "Dispatch time non favored tbegin",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x328084",
+ "EventName": "PM_NON_TM_RST_SC",
+ "BriefDescription": "non tm snp rst tm sc",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2001a",
+ "EventName": "PM_NTCG_ALL_FIN",
+ "BriefDescription": "Cycles after all instructions have finished to group completed",
+ "PublicDescription": "Ccycles after all instructions have finished to group completed"
+ },
+ {,
+ "EventCode": "0x20ac",
+ "EventName": "PM_OUTER_TBEGIN",
+ "BriefDescription": "Completion time outer tbegin",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20ae",
+ "EventName": "PM_OUTER_TEND",
+ "BriefDescription": "Completion time outer tend",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2005a",
+ "EventName": "PM_PREF_TRACKED",
+ "BriefDescription": "Total number of Prefetch Operations that were tracked",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1005a",
+ "EventName": "PM_PREF_TRACK_EFF",
+ "BriefDescription": "Prefetch Tracked was effective",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3005a",
+ "EventName": "PM_PREF_TRACK_INEFF",
+ "BriefDescription": "Prefetch tracked was ineffective",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4005a",
+ "EventName": "PM_PREF_TRACK_MOD",
+ "BriefDescription": "Prefetch tracked was moderate",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1005c",
+ "EventName": "PM_PREF_TRACK_MOD_L2",
+ "BriefDescription": "Prefetch Tracked was moderate (source L2)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3005c",
+ "EventName": "PM_PREF_TRACK_MOD_L3",
+ "BriefDescription": "Prefetch tracked was moderate (L3)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xe084",
+ "EventName": "PM_PTE_PREFETCH",
+ "BriefDescription": "PTE prefetches",
+ "PublicDescription": "PTE prefetches42"
+ },
+ {,
+ "EventCode": "0x16081",
+ "EventName": "PM_RC0_ALLOC",
+ "BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0x16080",
+ "EventName": "PM_RC0_BUSY",
+ "BriefDescription": "RC mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x200301ea",
+ "EventName": "PM_RC_LIFETIME_EXC_1024",
+ "BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 1024 cycles",
+ "PublicDescription": "Reload latency exceeded 1024 cyc"
+ },
+ {,
+ "EventCode": "0x200401ec",
+ "EventName": "PM_RC_LIFETIME_EXC_2048",
+ "BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 2048 cycles",
+ "PublicDescription": "Threshold counter exceeded a value of 2048"
+ },
+ {,
+ "EventCode": "0x200101e8",
+ "EventName": "PM_RC_LIFETIME_EXC_256",
+ "BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 256 cycles",
+ "PublicDescription": "Threshold counter exceed a count of 256"
+ },
+ {,
+ "EventCode": "0x200201e6",
+ "EventName": "PM_RC_LIFETIME_EXC_32",
+ "BriefDescription": "Number of times the RC machine for a sampled instruction was active for more than 32 cycles",
+ "PublicDescription": "Reload latency exceeded 32 cyc"
+ },
+ {,
+ "EventCode": "0x36088",
+ "EventName": "PM_RC_USAGE",
+ "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 RC machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x34808e",
+ "EventName": "PM_RD_CLEARING_SC",
+ "BriefDescription": "rd clearing sc",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x34808c",
+ "EventName": "PM_RD_FORMING_SC",
+ "BriefDescription": "rd forming sc",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x428086",
+ "EventName": "PM_RD_HIT_PF",
+ "BriefDescription": "rd machine hit l3 pf machine",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20004",
+ "EventName": "PM_REAL_SRQ_FULL",
+ "BriefDescription": "Out of real srq entries",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2006a",
+ "EventName": "PM_RUN_CYC_SMT2_SHRD_MODE",
+ "BriefDescription": "cycles this threads run latch is set and the core is in SMT2 shared mode",
+ "PublicDescription": "Cycles run latch is set and core is in SMT2-shared mode"
+ },
+ {,
+ "EventCode": "0x1006a",
+ "EventName": "PM_RUN_CYC_SMT2_SPLIT_MODE",
+ "BriefDescription": "Cycles run latch is set and core is in SMT2-split mode",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4006c",
+ "EventName": "PM_RUN_CYC_SMT8_MODE",
+ "BriefDescription": "Cycles run latch is set and core is in SMT8 mode",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xf082",
+ "EventName": "PM_SEC_ERAT_HIT",
+ "BriefDescription": "secondary ERAT Hit",
+ "PublicDescription": "secondary ERAT Hit42"
+ },
+ {,
+ "EventCode": "0x508c",
+ "EventName": "PM_SHL_CREATED",
+ "BriefDescription": "Store-Hit-Load Table Entry Created",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x508e",
+ "EventName": "PM_SHL_ST_CONVERT",
+ "BriefDescription": "Store-Hit-Load Table Read Hit with entry Enabled",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x5090",
+ "EventName": "PM_SHL_ST_DISABLE",
+ "BriefDescription": "Store-Hit-Load Table Read Hit with entry Disabled (entry was disabled due to the entry shown to not prevent the flush)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x26085",
+ "EventName": "PM_SN0_ALLOC",
+ "BriefDescription": "SN mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
+ "PublicDescription": "0.0"
+ },
+ {,
+ "EventCode": "0x26084",
+ "EventName": "PM_SN0_BUSY",
+ "BriefDescription": "SN mach 0 Busy. Used by PMU to sample ave RC livetime(mach0 used as sample point)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xd0b2",
+ "EventName": "PM_SNOOP_TLBIE",
+ "BriefDescription": "TLBIE snoop",
+ "PublicDescription": "TLBIE snoopSnoop TLBIE"
+ },
+ {,
+ "EventCode": "0x338088",
+ "EventName": "PM_SNP_TM_HIT_M",
+ "BriefDescription": "snp tm st hit m mu",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x33808a",
+ "EventName": "PM_SNP_TM_HIT_T",
+ "BriefDescription": "snp tm_st_hit t tn te",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4608c",
+ "EventName": "PM_SN_USAGE",
+ "BriefDescription": "Continuous 16 cycle(2to1) window where this signals rotates thru sampling each L2 SN machine busy. PMU uses this wave to then do 16 cyc count to sample total number of machs running",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10028",
+ "EventName": "PM_STALL_END_GCT_EMPTY",
+ "BriefDescription": "Count ended because GCT went empty",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xc090",
+ "EventName": "PM_STCX_LSU",
+ "BriefDescription": "STCX executed reported at sent to nest",
+ "PublicDescription": "STCX executed reported at sent to nest42"
+ },
+ {,
+ "EventCode": "0x717080",
+ "EventName": "PM_ST_CAUSED_FAIL",
+ "BriefDescription": "Non TM St caused any thread to fail",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3090",
+ "EventName": "PM_SWAP_CANCEL",
+ "BriefDescription": "SWAP cancel , rtag not available",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3092",
+ "EventName": "PM_SWAP_CANCEL_GPR",
+ "BriefDescription": "SWAP cancel , rtag not available for gpr",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x308c",
+ "EventName": "PM_SWAP_COMPLETE",
+ "BriefDescription": "swap cast in completed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x308e",
+ "EventName": "PM_SWAP_COMPLETE_GPR",
+ "BriefDescription": "swap cast in completed fpr gpr",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xe086",
+ "EventName": "PM_TABLEWALK_CYC_PREF",
+ "BriefDescription": "tablewalk qualified for pte prefetches",
+ "PublicDescription": "tablewalk qualified for pte prefetches42"
+ },
+ {,
+ "EventCode": "0x20b2",
+ "EventName": "PM_TABORT_TRECLAIM",
+ "BriefDescription": "Completion time tabortnoncd, tabortcd, treclaim",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xe0ba",
+ "EventName": "PM_TEND_PEND_CYC",
+ "BriefDescription": "TEND latency per thread",
+ "PublicDescription": "TEND latency per thread42"
+ },
+ {,
+ "EventCode": "0x10012",
+ "EventName": "PM_THRD_GRP_CMPL_BOTH_CYC",
+ "BriefDescription": "Cycles group completed on both completion slots by any thread",
+ "PublicDescription": "Two threads finished same cycle (gated by run latch)"
+ },
+ {,
+ "EventCode": "0x40bc",
+ "EventName": "PM_THRD_PRIO_0_1_CYC",
+ "BriefDescription": "Cycles thread running at priority level 0 or 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x40be",
+ "EventName": "PM_THRD_PRIO_2_3_CYC",
+ "BriefDescription": "Cycles thread running at priority level 2 or 3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x5080",
+ "EventName": "PM_THRD_PRIO_4_5_CYC",
+ "BriefDescription": "Cycles thread running at priority level 4 or 5",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x5082",
+ "EventName": "PM_THRD_PRIO_6_7_CYC",
+ "BriefDescription": "Cycles thread running at priority level 6 or 7",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3098",
+ "EventName": "PM_THRD_REBAL_CYC",
+ "BriefDescription": "cycles rebalance was active",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20b8",
+ "EventName": "PM_TM_BEGIN_ALL",
+ "BriefDescription": "Tm any tbegin",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x318082",
+ "EventName": "PM_TM_CAM_OVERFLOW",
+ "BriefDescription": "l3 tm cam overflow during L2 co of SC",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x74708c",
+ "EventName": "PM_TM_CAP_OVERFLOW",
+ "BriefDescription": "TM Footprint Capactiy Overflow",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20ba",
+ "EventName": "PM_TM_END_ALL",
+ "BriefDescription": "Tm any tend",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3086",
+ "EventName": "PM_TM_FAIL_CONF_NON_TM",
+ "BriefDescription": "TEXAS fail reason @ completion",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3088",
+ "EventName": "PM_TM_FAIL_CON_TM",
+ "BriefDescription": "TEXAS fail reason @ completion",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xe0b2",
+ "EventName": "PM_TM_FAIL_DISALLOW",
+ "BriefDescription": "TM fail disallow",
+ "PublicDescription": "TM fail disallow42"
+ },
+ {,
+ "EventCode": "0x3084",
+ "EventName": "PM_TM_FAIL_FOOTPRINT_OVERFLOW",
+ "BriefDescription": "TEXAS fail reason @ completion",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xe0b8",
+ "EventName": "PM_TM_FAIL_NON_TX_CONFLICT",
+ "BriefDescription": "Non transactional conflict from LSU whtver gets repoted to texas",
+ "PublicDescription": "Non transactional conflict from LSU whtver gets repoted to texas42"
+ },
+ {,
+ "EventCode": "0x308a",
+ "EventName": "PM_TM_FAIL_SELF",
+ "BriefDescription": "TEXAS fail reason @ completion",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xe0b4",
+ "EventName": "PM_TM_FAIL_TLBIE",
+ "BriefDescription": "TLBIE hit bloom filter",
+ "PublicDescription": "TLBIE hit bloom filter42"
+ },
+ {,
+ "EventCode": "0xe0b6",
+ "EventName": "PM_TM_FAIL_TX_CONFLICT",
+ "BriefDescription": "Transactional conflict from LSU, whatever gets reported to texas",
+ "PublicDescription": "Transactional conflict from LSU, whatever gets reported to texas 42"
+ },
+ {,
+ "EventCode": "0x727086",
+ "EventName": "PM_TM_FAV_CAUSED_FAIL",
+ "BriefDescription": "TM Load (fav) caused another thread to fail",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x717082",
+ "EventName": "PM_TM_LD_CAUSED_FAIL",
+ "BriefDescription": "Non TM Ld caused any thread to fail",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x727084",
+ "EventName": "PM_TM_LD_CONF",
+ "BriefDescription": "TM Load (fav or non-fav) ran into conflict (failed)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x328086",
+ "EventName": "PM_TM_RST_SC",
+ "BriefDescription": "tm snp rst tm sc",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x318080",
+ "EventName": "PM_TM_SC_CO",
+ "BriefDescription": "l3 castout tm Sc line",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x73708a",
+ "EventName": "PM_TM_ST_CAUSED_FAIL",
+ "BriefDescription": "TM Store (fav or non-fav) caused another thread to fail",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x737088",
+ "EventName": "PM_TM_ST_CONF",
+ "BriefDescription": "TM Store (fav or non-fav) ran into conflict (failed)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20bc",
+ "EventName": "PM_TM_TBEGIN",
+ "BriefDescription": "Tm nested tbegin",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3080",
+ "EventName": "PM_TM_TRESUME",
+ "BriefDescription": "Tm resume",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20be",
+ "EventName": "PM_TM_TSUSPEND",
+ "BriefDescription": "Tm suspend",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xe08c",
+ "EventName": "PM_UP_PREF_L3",
+ "BriefDescription": "Micropartition prefetch",
+ "PublicDescription": "Micropartition prefetch42"
+ },
+ {,
+ "EventCode": "0xe08e",
+ "EventName": "PM_UP_PREF_POINTER",
+ "BriefDescription": "Micrpartition pointer prefetches",
+ "PublicDescription": "Micrpartition pointer prefetches42"
+ },
+ {,
+ "EventCode": "0xa0a4",
+ "EventName": "PM_VSU0_16FLOP",
+ "BriefDescription": "Sixteen flops operation (SP vector versions of fdiv,fsqrt)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa080",
+ "EventName": "PM_VSU0_1FLOP",
+ "BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finished",
+ "PublicDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finishedDecode into 1,2,4 FLOP according to instr IOP, multiplied by #vector elements according to route( eg x1, x2, x4) Only if instr sends finish to ISU"
+ },
+ {,
+ "EventCode": "0xa098",
+ "EventName": "PM_VSU0_2FLOP",
+ "BriefDescription": "two flops operation (scalar fmadd, fnmadd, fmsub, fnmsub and DP vector versions of single flop instructions)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa09c",
+ "EventName": "PM_VSU0_4FLOP",
+ "BriefDescription": "four flops operation (scalar fdiv, fsqrt, DP vector version of fmadd, fnmadd, fmsub, fnmsub, SP vector versions of single flop instructions)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0a0",
+ "EventName": "PM_VSU0_8FLOP",
+ "BriefDescription": "eight flops operation (DP vector versions of fdiv,fsqrt and SP vector versions of fmadd,fnmadd,fmsub,fnmsub)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0a4",
+ "EventName": "PM_VSU0_COMPLEX_ISSUED",
+ "BriefDescription": "Complex VMX instruction issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0b4",
+ "EventName": "PM_VSU0_CY_ISSUED",
+ "BriefDescription": "Cryptographic instruction RFC02196 Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0a8",
+ "EventName": "PM_VSU0_DD_ISSUED",
+ "BriefDescription": "64BIT Decimal Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa08c",
+ "EventName": "PM_VSU0_DP_2FLOP",
+ "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa090",
+ "EventName": "PM_VSU0_DP_FMA",
+ "BriefDescription": "DP vector version of fmadd,fnmadd,fmsub,fnmsub",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa094",
+ "EventName": "PM_VSU0_DP_FSQRT_FDIV",
+ "BriefDescription": "DP vector versions of fdiv,fsqrt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0ac",
+ "EventName": "PM_VSU0_DQ_ISSUED",
+ "BriefDescription": "128BIT Decimal Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0b0",
+ "EventName": "PM_VSU0_EX_ISSUED",
+ "BriefDescription": "Direct move 32/64b VRFtoGPR RFC02206 Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0bc",
+ "EventName": "PM_VSU0_FIN",
+ "BriefDescription": "VSU0 Finished an instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa084",
+ "EventName": "PM_VSU0_FMA",
+ "BriefDescription": "two flops operation (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only!",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb098",
+ "EventName": "PM_VSU0_FPSCR",
+ "BriefDescription": "Move to/from FPSCR type instruction issued on Pipe 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa088",
+ "EventName": "PM_VSU0_FSQRT_FDIV",
+ "BriefDescription": "four flops operation (fdiv,fsqrt) Scalar Instructions only!",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb090",
+ "EventName": "PM_VSU0_PERMUTE_ISSUED",
+ "BriefDescription": "Permute VMX Instruction Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb088",
+ "EventName": "PM_VSU0_SCALAR_DP_ISSUED",
+ "BriefDescription": "Double Precision scalar instruction issued on Pipe0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb094",
+ "EventName": "PM_VSU0_SIMPLE_ISSUED",
+ "BriefDescription": "Simple VMX instruction issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0a8",
+ "EventName": "PM_VSU0_SINGLE",
+ "BriefDescription": "FPU single precision",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb09c",
+ "EventName": "PM_VSU0_SQ",
+ "BriefDescription": "Store Vector Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb08c",
+ "EventName": "PM_VSU0_STF",
+ "BriefDescription": "FPU store (SP or DP) issued on Pipe0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb080",
+ "EventName": "PM_VSU0_VECTOR_DP_ISSUED",
+ "BriefDescription": "Double Precision vector instruction issued on Pipe0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb084",
+ "EventName": "PM_VSU0_VECTOR_SP_ISSUED",
+ "BriefDescription": "Single Precision vector instruction issued (executed)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0a6",
+ "EventName": "PM_VSU1_16FLOP",
+ "BriefDescription": "Sixteen flops operation (SP vector versions of fdiv,fsqrt)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa082",
+ "EventName": "PM_VSU1_1FLOP",
+ "BriefDescription": "one flop (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg) operation finished",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa09a",
+ "EventName": "PM_VSU1_2FLOP",
+ "BriefDescription": "two flops operation (scalar fmadd, fnmadd, fmsub, fnmsub and DP vector versions of single flop instructions)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa09e",
+ "EventName": "PM_VSU1_4FLOP",
+ "BriefDescription": "four flops operation (scalar fdiv, fsqrt, DP vector version of fmadd, fnmadd, fmsub, fnmsub, SP vector versions of single flop instructions)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0a2",
+ "EventName": "PM_VSU1_8FLOP",
+ "BriefDescription": "eight flops operation (DP vector versions of fdiv,fsqrt and SP vector versions of fmadd,fnmadd,fmsub,fnmsub)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0a6",
+ "EventName": "PM_VSU1_COMPLEX_ISSUED",
+ "BriefDescription": "Complex VMX instruction issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0b6",
+ "EventName": "PM_VSU1_CY_ISSUED",
+ "BriefDescription": "Cryptographic instruction RFC02196 Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0aa",
+ "EventName": "PM_VSU1_DD_ISSUED",
+ "BriefDescription": "64BIT Decimal Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa08e",
+ "EventName": "PM_VSU1_DP_2FLOP",
+ "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa092",
+ "EventName": "PM_VSU1_DP_FMA",
+ "BriefDescription": "DP vector version of fmadd,fnmadd,fmsub,fnmsub",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa096",
+ "EventName": "PM_VSU1_DP_FSQRT_FDIV",
+ "BriefDescription": "DP vector versions of fdiv,fsqrt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0ae",
+ "EventName": "PM_VSU1_DQ_ISSUED",
+ "BriefDescription": "128BIT Decimal Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb0b2",
+ "EventName": "PM_VSU1_EX_ISSUED",
+ "BriefDescription": "Direct move 32/64b VRFtoGPR RFC02206 Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0be",
+ "EventName": "PM_VSU1_FIN",
+ "BriefDescription": "VSU1 Finished an instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa086",
+ "EventName": "PM_VSU1_FMA",
+ "BriefDescription": "two flops operation (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only!",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb09a",
+ "EventName": "PM_VSU1_FPSCR",
+ "BriefDescription": "Move to/from FPSCR type instruction issued on Pipe 0",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa08a",
+ "EventName": "PM_VSU1_FSQRT_FDIV",
+ "BriefDescription": "four flops operation (fdiv,fsqrt) Scalar Instructions only!",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb092",
+ "EventName": "PM_VSU1_PERMUTE_ISSUED",
+ "BriefDescription": "Permute VMX Instruction Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb08a",
+ "EventName": "PM_VSU1_SCALAR_DP_ISSUED",
+ "BriefDescription": "Double Precision scalar instruction issued on Pipe1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb096",
+ "EventName": "PM_VSU1_SIMPLE_ISSUED",
+ "BriefDescription": "Simple VMX instruction issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xa0aa",
+ "EventName": "PM_VSU1_SINGLE",
+ "BriefDescription": "FPU single precision",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb09e",
+ "EventName": "PM_VSU1_SQ",
+ "BriefDescription": "Store Vector Issued",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb08e",
+ "EventName": "PM_VSU1_STF",
+ "BriefDescription": "FPU store (SP or DP) issued on Pipe1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb082",
+ "EventName": "PM_VSU1_VECTOR_DP_ISSUED",
+ "BriefDescription": "Double Precision vector instruction issued on Pipe1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0xb086",
+ "EventName": "PM_VSU1_VECTOR_SP_ISSUED",
+ "BriefDescription": "Single Precision vector instruction issued (executed)",
+ "PublicDescription": ""
+ },
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json
new file mode 100644
index 000000000000..293f3a4c6901
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/pipeline.json
@@ -0,0 +1,350 @@
+[
+ {,
+ "EventCode": "0x100f2",
+ "EventName": "PM_1PLUS_PPC_CMPL",
+ "BriefDescription": "1 or more ppc insts finished",
+ "PublicDescription": "1 or more ppc insts finished (completed)"
+ },
+ {,
+ "EventCode": "0x400f2",
+ "EventName": "PM_1PLUS_PPC_DISP",
+ "BriefDescription": "Cycles at least one Instr Dispatched",
+ "PublicDescription": "Cycles at least one Instr Dispatched. Could be a group with only microcode. Issue HW016521"
+ },
+ {,
+ "EventCode": "0x100fa",
+ "EventName": "PM_ANY_THRD_RUN_CYC",
+ "BriefDescription": "One of threads in run_cycles",
+ "PublicDescription": "Any thread in run_cycles (was one thread in run_cycles)"
+ },
+ {,
+ "EventCode": "0x4000a",
+ "EventName": "PM_CMPLU_STALL",
+ "BriefDescription": "Completion stall",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d018",
+ "EventName": "PM_CMPLU_STALL_BRU",
+ "BriefDescription": "Completion stall due to a Branch Unit",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c012",
+ "EventName": "PM_CMPLU_STALL_DCACHE_MISS",
+ "BriefDescription": "Completion stall by Dcache miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c018",
+ "EventName": "PM_CMPLU_STALL_DMISS_L21_L31",
+ "BriefDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c016",
+ "EventName": "PM_CMPLU_STALL_DMISS_L2L3",
+ "BriefDescription": "Completion stall by Dcache miss which resolved in L2/L3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c016",
+ "EventName": "PM_CMPLU_STALL_DMISS_L2L3_CONFLICT",
+ "BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 with a conflict",
+ "PublicDescription": "Completion stall due to cache miss resolving in core's L2/L3 with a conflict"
+ },
+ {,
+ "EventCode": "0x4c01a",
+ "EventName": "PM_CMPLU_STALL_DMISS_L3MISS",
+ "BriefDescription": "Completion stall due to cache miss resolving missed the L3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c018",
+ "EventName": "PM_CMPLU_STALL_DMISS_LMEM",
+ "BriefDescription": "Completion stall due to cache miss that resolves in local memory",
+ "PublicDescription": "Completion stall due to cache miss resolving in core's Local Memory"
+ },
+ {,
+ "EventCode": "0x2c01c",
+ "EventName": "PM_CMPLU_STALL_DMISS_REMOTE",
+ "BriefDescription": "Completion stall by Dcache miss which resolved from remote chip (cache or memory)",
+ "PublicDescription": "Completion stall by Dcache miss which resolved on chip ( excluding local L2/L3)"
+ },
+ {,
+ "EventCode": "0x4c012",
+ "EventName": "PM_CMPLU_STALL_ERAT_MISS",
+ "BriefDescription": "Completion stall due to LSU reject ERAT miss",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d016",
+ "EventName": "PM_CMPLU_STALL_FXLONG",
+ "BriefDescription": "Completion stall due to a long latency fixed point instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2d016",
+ "EventName": "PM_CMPLU_STALL_FXU",
+ "BriefDescription": "Completion stall due to FXU",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30036",
+ "EventName": "PM_CMPLU_STALL_HWSYNC",
+ "BriefDescription": "completion stall due to hwsync",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4d014",
+ "EventName": "PM_CMPLU_STALL_LOAD_FINISH",
+ "BriefDescription": "Completion stall due to a Load finish",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c010",
+ "EventName": "PM_CMPLU_STALL_LSU",
+ "BriefDescription": "Completion stall by LSU instruction",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10036",
+ "EventName": "PM_CMPLU_STALL_LWSYNC",
+ "BriefDescription": "completion stall due to isync/lwsync",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30006",
+ "EventName": "PM_CMPLU_STALL_OTHER_CMPL",
+ "BriefDescription": "Instructions core completed while this tread was stalled",
+ "PublicDescription": "Instructions core completed while this thread was stalled"
+ },
+ {,
+ "EventCode": "0x4c01c",
+ "EventName": "PM_CMPLU_STALL_ST_FWD",
+ "BriefDescription": "Completion stall due to store forward",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1001c",
+ "EventName": "PM_CMPLU_STALL_THRD",
+ "BriefDescription": "Completion Stalled due to thread conflict. Group ready to complete but it was another thread's turn",
+ "PublicDescription": "Completion stall due to thread conflict"
+ },
+ {,
+ "EventCode": "0x1e",
+ "EventName": "PM_CYC",
+ "BriefDescription": "Cycles",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10006",
+ "EventName": "PM_DISP_HELD",
+ "BriefDescription": "Dispatch Held",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4003c",
+ "EventName": "PM_DISP_HELD_SYNC_HOLD",
+ "BriefDescription": "Dispatch held due to SYNC hold",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x200f8",
+ "EventName": "PM_EXT_INT",
+ "BriefDescription": "external interrupt",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x400f8",
+ "EventName": "PM_FLUSH",
+ "BriefDescription": "Flush (any type)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30012",
+ "EventName": "PM_FLUSH_COMPLETION",
+ "BriefDescription": "Completion Flush",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3000c",
+ "EventName": "PM_FREQ_DOWN",
+ "BriefDescription": "Power Management: Below Threshold B",
+ "PublicDescription": "Frequency is being slewed down due to Power Management"
+ },
+ {,
+ "EventCode": "0x4000c",
+ "EventName": "PM_FREQ_UP",
+ "BriefDescription": "Power Management: Above Threshold A",
+ "PublicDescription": "Frequency is being slewed up due to Power Management"
+ },
+ {,
+ "EventCode": "0x2000a",
+ "EventName": "PM_HV_CYC",
+ "BriefDescription": "Cycles in which msr_hv is high. Note that this event does not take msr_pr into consideration",
+ "PublicDescription": "cycles in hypervisor mode"
+ },
+ {,
+ "EventCode": "0x3405e",
+ "EventName": "PM_IFETCH_THROTTLE",
+ "BriefDescription": "Cycles in which Instruction fetch throttle was active",
+ "PublicDescription": "Cycles instruction fecth was throttled in IFU"
+ },
+ {,
+ "EventCode": "0x10014",
+ "EventName": "PM_IOPS_CMPL",
+ "BriefDescription": "Internal Operations completed",
+ "PublicDescription": "IOPS Completed"
+ },
+ {,
+ "EventCode": "0x3c058",
+ "EventName": "PM_LARX_FIN",
+ "BriefDescription": "Larx finished",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1002e",
+ "EventName": "PM_LD_CMPL",
+ "BriefDescription": "count of Loads completed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10062",
+ "EventName": "PM_LD_L3MISS_PEND_CYC",
+ "BriefDescription": "Cycles L3 miss was pending for this thread",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30066",
+ "EventName": "PM_LSU_FIN",
+ "BriefDescription": "LSU Finished an instruction (up to 2 per cycle)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2003e",
+ "EventName": "PM_LSU_LMQ_SRQ_EMPTY_CYC",
+ "BriefDescription": "LSU empty (lmq and srq empty)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e05c",
+ "EventName": "PM_LSU_REJECT_ERAT_MISS",
+ "BriefDescription": "LSU Reject due to ERAT (up to 4 per cycles)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e05c",
+ "EventName": "PM_LSU_REJECT_LHS",
+ "BriefDescription": "LSU Reject due to LHS (up to 4 per cycle)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e05c",
+ "EventName": "PM_LSU_REJECT_LMQ_FULL",
+ "BriefDescription": "LSU reject due to LMQ full ( 4 per cycle)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1001a",
+ "EventName": "PM_LSU_SRQ_FULL_CYC",
+ "BriefDescription": "Storage Queue is full and is blocking dispatch",
+ "PublicDescription": "SRQ is Full"
+ },
+ {,
+ "EventCode": "0x40014",
+ "EventName": "PM_PROBE_NOP_DISP",
+ "BriefDescription": "ProbeNops dispatched",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x600f4",
+ "EventName": "PM_RUN_CYC",
+ "BriefDescription": "Run_cycles",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3006c",
+ "EventName": "PM_RUN_CYC_SMT2_MODE",
+ "BriefDescription": "Cycles run latch is set and core is in SMT2 mode",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2006c",
+ "EventName": "PM_RUN_CYC_SMT4_MODE",
+ "BriefDescription": "cycles this threads run latch is set and the core is in SMT4 mode",
+ "PublicDescription": "Cycles run latch is set and core is in SMT4 mode"
+ },
+ {,
+ "EventCode": "0x1006c",
+ "EventName": "PM_RUN_CYC_ST_MODE",
+ "BriefDescription": "Cycles run latch is set and core is in ST mode",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x500fa",
+ "EventName": "PM_RUN_INST_CMPL",
+ "BriefDescription": "Run_Instructions",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e058",
+ "EventName": "PM_STCX_FAIL",
+ "BriefDescription": "stcx failed",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x20016",
+ "EventName": "PM_ST_CMPL",
+ "BriefDescription": "Store completion count",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x200f0",
+ "EventName": "PM_ST_FIN",
+ "BriefDescription": "Store Instructions Finished",
+ "PublicDescription": "Store Instructions Finished (store sent to nest)"
+ },
+ {,
+ "EventCode": "0x20018",
+ "EventName": "PM_ST_FWD",
+ "BriefDescription": "Store forwards that finished",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10026",
+ "EventName": "PM_TABLEWALK_CYC",
+ "BriefDescription": "Cycles when a tablewalk (I or D) is active",
+ "PublicDescription": "Tablewalk Active"
+ },
+ {,
+ "EventCode": "0x300f8",
+ "EventName": "PM_TB_BIT_TRANS",
+ "BriefDescription": "timebase event",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2000c",
+ "EventName": "PM_THRD_ALL_RUN_CYC",
+ "BriefDescription": "All Threads in Run_cycles (was both threads in run_cycles)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30058",
+ "EventName": "PM_TLBIE_FIN",
+ "BriefDescription": "tlbie finished",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10060",
+ "EventName": "PM_TM_TRANS_RUN_CYC",
+ "BriefDescription": "run cycles in transactional state",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e012",
+ "EventName": "PM_TM_TX_PASS_RUN_CYC",
+ "BriefDescription": "cycles spent in successful transactions",
+ "PublicDescription": "run cycles spent in successful transactions"
+ },
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/pmc.json b/tools/perf/pmu-events/arch/powerpc/power8/pmc.json
new file mode 100644
index 000000000000..583e4d937621
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/pmc.json
@@ -0,0 +1,140 @@
+[
+ {,
+ "EventCode": "0x20010",
+ "EventName": "PM_PMC1_OVERFLOW",
+ "BriefDescription": "Overflow from counter 1",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30010",
+ "EventName": "PM_PMC2_OVERFLOW",
+ "BriefDescription": "Overflow from counter 2",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30020",
+ "EventName": "PM_PMC2_REWIND",
+ "BriefDescription": "PMC2 Rewind Event (did not match condition)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10022",
+ "EventName": "PM_PMC2_SAVED",
+ "BriefDescription": "PMC2 Rewind Value saved",
+ "PublicDescription": "PMC2 Rewind Value saved (matched condition)"
+ },
+ {,
+ "EventCode": "0x40010",
+ "EventName": "PM_PMC3_OVERFLOW",
+ "BriefDescription": "Overflow from counter 3",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10010",
+ "EventName": "PM_PMC4_OVERFLOW",
+ "BriefDescription": "Overflow from counter 4",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10020",
+ "EventName": "PM_PMC4_REWIND",
+ "BriefDescription": "PMC4 Rewind Event",
+ "PublicDescription": "PMC4 Rewind Event (did not match condition)"
+ },
+ {,
+ "EventCode": "0x30022",
+ "EventName": "PM_PMC4_SAVED",
+ "BriefDescription": "PMC4 Rewind Value saved (matched condition)",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10024",
+ "EventName": "PM_PMC5_OVERFLOW",
+ "BriefDescription": "Overflow from counter 5",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x30024",
+ "EventName": "PM_PMC6_OVERFLOW",
+ "BriefDescription": "Overflow from counter 6",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x400f4",
+ "EventName": "PM_RUN_PURR",
+ "BriefDescription": "Run_PURR",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x10008",
+ "EventName": "PM_RUN_SPURR",
+ "BriefDescription": "Run SPURR",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x0",
+ "EventName": "PM_SUSPENDED",
+ "BriefDescription": "Counter OFF",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x301ea",
+ "EventName": "PM_THRESH_EXC_1024",
+ "BriefDescription": "Threshold counter exceeded a value of 1024",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x401ea",
+ "EventName": "PM_THRESH_EXC_128",
+ "BriefDescription": "Threshold counter exceeded a value of 128",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x401ec",
+ "EventName": "PM_THRESH_EXC_2048",
+ "BriefDescription": "Threshold counter exceeded a value of 2048",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x101e8",
+ "EventName": "PM_THRESH_EXC_256",
+ "BriefDescription": "Threshold counter exceed a count of 256",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x201e6",
+ "EventName": "PM_THRESH_EXC_32",
+ "BriefDescription": "Threshold counter exceeded a value of 32",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x101e6",
+ "EventName": "PM_THRESH_EXC_4096",
+ "BriefDescription": "Threshold counter exceed a count of 4096",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x201e8",
+ "EventName": "PM_THRESH_EXC_512",
+ "BriefDescription": "Threshold counter exceeded a value of 512",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x301e8",
+ "EventName": "PM_THRESH_EXC_64",
+ "BriefDescription": "IFU non-branch finished",
+ "PublicDescription": "Threshold counter exceeded a value of 64"
+ },
+ {,
+ "EventCode": "0x101ec",
+ "EventName": "PM_THRESH_MET",
+ "BriefDescription": "threshold exceeded",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4016e",
+ "EventName": "PM_THRESH_NOT_MET",
+ "BriefDescription": "Threshold counter did not meet threshold",
+ "PublicDescription": ""
+ },
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/translation.json b/tools/perf/pmu-events/arch/powerpc/power8/translation.json
new file mode 100644
index 000000000000..e47a55459bc8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power8/translation.json
@@ -0,0 +1,176 @@
+[
+ {,
+ "EventCode": "0x4c054",
+ "EventName": "PM_DERAT_MISS_16G",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16G",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3c054",
+ "EventName": "PM_DERAT_MISS_16M",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1c056",
+ "EventName": "PM_DERAT_MISS_4K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c054",
+ "EventName": "PM_DERAT_MISS_64K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e048",
+ "EventName": "PM_DPTEG_FROM_DL2L3_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e048",
+ "EventName": "PM_DPTEG_FROM_DL2L3_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on a different Node or Group (Distant), as this chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e042",
+ "EventName": "PM_DPTEG_FROM_L2",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e04e",
+ "EventName": "PM_DPTEG_FROM_L2MISS",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e040",
+ "EventName": "PM_DPTEG_FROM_L2_MEPF",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 hit without dispatch conflicts on Mepf state. due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e040",
+ "EventName": "PM_DPTEG_FROM_L2_NO_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L2 without conflict due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e042",
+ "EventName": "PM_DPTEG_FROM_L3",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3e042",
+ "EventName": "PM_DPTEG_FROM_L3_DISP_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 with dispatch conflict due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e042",
+ "EventName": "PM_DPTEG_FROM_L3_MEPF",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without dispatch conflicts hit on Mepf state. due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e044",
+ "EventName": "PM_DPTEG_FROM_L3_NO_CONFLICT",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from local core's L3 without conflict due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e04c",
+ "EventName": "PM_DPTEG_FROM_LL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's L4 cache due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e048",
+ "EventName": "PM_DPTEG_FROM_LMEM",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from the local chip's Memory due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e04c",
+ "EventName": "PM_DPTEG_FROM_MEMORY",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a memory location including L4 from local remote or distant due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4e04a",
+ "EventName": "PM_DPTEG_FROM_OFF_CHIP_CACHE",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on a different chip (remote or distant) due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e048",
+ "EventName": "PM_DPTEG_FROM_ON_CHIP_CACHE",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB either shared or modified data from another core's L2/L3 on the same chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e046",
+ "EventName": "PM_DPTEG_FROM_RL2L3_MOD",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Modified (M) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x1e04a",
+ "EventName": "PM_DPTEG_FROM_RL2L3_SHR",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB with Shared (S) data from another chip's L2 or L3 on the same Node or Group (Remote), as this chip due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2e04a",
+ "EventName": "PM_DPTEG_FROM_RL4",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from another chip's L4 on the same Node or Group ( Remote) due to a data side request",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x300fc",
+ "EventName": "PM_DTLB_MISS",
+ "BriefDescription": "Data PTEG reload",
+ "PublicDescription": "Data PTEG Reloaded (DTLB Miss)"
+ },
+ {,
+ "EventCode": "0x1c058",
+ "EventName": "PM_DTLB_MISS_16G",
+ "BriefDescription": "Data TLB Miss page size 16G",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x4c056",
+ "EventName": "PM_DTLB_MISS_16M",
+ "BriefDescription": "Data TLB Miss page size 16M",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x2c056",
+ "EventName": "PM_DTLB_MISS_4K",
+ "BriefDescription": "Data TLB Miss page size 4k",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x3c056",
+ "EventName": "PM_DTLB_MISS_64K",
+ "BriefDescription": "Data TLB Miss page size 64K",
+ "PublicDescription": ""
+ },
+ {,
+ "EventCode": "0x200f6",
+ "EventName": "PM_LSU_DERAT_MISS",
+ "BriefDescription": "DERAT Reloaded due to a DERAT miss",
+ "PublicDescription": "DERAT Reloaded (Miss)"
+ },
+ {,
+ "EventCode": "0x20066",
+ "EventName": "PM_TLB_MISS",
+ "BriefDescription": "TLB Miss (I + D)",
+ "PublicDescription": ""
+ },
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/cache.json b/tools/perf/pmu-events/arch/x86/bonnell/cache.json
new file mode 100644
index 000000000000..ffab90c5891c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/bonnell/cache.json
@@ -0,0 +1,746 @@
+[
+ {
+ "EventCode": "0x21",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "L2_ADS.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cycles L2 address bus is in use."
+ },
+ {
+ "EventCode": "0x22",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "L2_DBUS_BUSY.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cycles the L2 cache data bus is busy."
+ },
+ {
+ "EventCode": "0x23",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "L2_DBUS_BUSY_RD.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cycles the L2 transfers data to the core."
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1",
+ "UMask": "0x70",
+ "EventName": "L2_LINES_IN.SELF.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache misses."
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "L2_LINES_IN.SELF.DEMAND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache misses."
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1",
+ "UMask": "0x50",
+ "EventName": "L2_LINES_IN.SELF.PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache misses."
+ },
+ {
+ "EventCode": "0x25",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "L2_M_LINES_IN.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache line modifications."
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1",
+ "UMask": "0x70",
+ "EventName": "L2_LINES_OUT.SELF.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache lines evicted."
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "L2_LINES_OUT.SELF.DEMAND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache lines evicted."
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1",
+ "UMask": "0x50",
+ "EventName": "L2_LINES_OUT.SELF.PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache lines evicted."
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1",
+ "UMask": "0x70",
+ "EventName": "L2_M_LINES_OUT.SELF.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Modified lines evicted from the L2 cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "L2_M_LINES_OUT.SELF.DEMAND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Modified lines evicted from the L2 cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1",
+ "UMask": "0x50",
+ "EventName": "L2_M_LINES_OUT.SELF.PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Modified lines evicted from the L2 cache"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1",
+ "UMask": "0x44",
+ "EventName": "L2_IFETCH.SELF.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cacheable instruction fetch requests"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_IFETCH.SELF.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cacheable instruction fetch requests"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L2_IFETCH.SELF.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cacheable instruction fetch requests"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1",
+ "UMask": "0x42",
+ "EventName": "L2_IFETCH.SELF.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cacheable instruction fetch requests"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_IFETCH.SELF.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cacheable instruction fetch requests"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x74",
+ "EventName": "L2_LD.SELF.ANY.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x71",
+ "EventName": "L2_LD.SELF.ANY.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x78",
+ "EventName": "L2_LD.SELF.ANY.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x72",
+ "EventName": "L2_LD.SELF.ANY.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x7f",
+ "EventName": "L2_LD.SELF.ANY.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x44",
+ "EventName": "L2_LD.SELF.DEMAND.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_LD.SELF.DEMAND.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L2_LD.SELF.DEMAND.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x42",
+ "EventName": "L2_LD.SELF.DEMAND.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_LD.SELF.DEMAND.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x54",
+ "EventName": "L2_LD.SELF.PREFETCH.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x51",
+ "EventName": "L2_LD.SELF.PREFETCH.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x58",
+ "EventName": "L2_LD.SELF.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x52",
+ "EventName": "L2_LD.SELF.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x29",
+ "Counter": "0,1",
+ "UMask": "0x5f",
+ "EventName": "L2_LD.SELF.PREFETCH.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache reads"
+ },
+ {
+ "EventCode": "0x2A",
+ "Counter": "0,1",
+ "UMask": "0x44",
+ "EventName": "L2_ST.SELF.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 store requests"
+ },
+ {
+ "EventCode": "0x2A",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_ST.SELF.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 store requests"
+ },
+ {
+ "EventCode": "0x2A",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L2_ST.SELF.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 store requests"
+ },
+ {
+ "EventCode": "0x2A",
+ "Counter": "0,1",
+ "UMask": "0x42",
+ "EventName": "L2_ST.SELF.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 store requests"
+ },
+ {
+ "EventCode": "0x2A",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_ST.SELF.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 store requests"
+ },
+ {
+ "EventCode": "0x2B",
+ "Counter": "0,1",
+ "UMask": "0x44",
+ "EventName": "L2_LOCK.SELF.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 locked accesses"
+ },
+ {
+ "EventCode": "0x2B",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_LOCK.SELF.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 locked accesses"
+ },
+ {
+ "EventCode": "0x2B",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L2_LOCK.SELF.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 locked accesses"
+ },
+ {
+ "EventCode": "0x2B",
+ "Counter": "0,1",
+ "UMask": "0x42",
+ "EventName": "L2_LOCK.SELF.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 locked accesses"
+ },
+ {
+ "EventCode": "0x2B",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_LOCK.SELF.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 locked accesses"
+ },
+ {
+ "EventCode": "0x2C",
+ "Counter": "0,1",
+ "UMask": "0x44",
+ "EventName": "L2_DATA_RQSTS.SELF.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All data requests from the L1 data cache"
+ },
+ {
+ "EventCode": "0x2C",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_DATA_RQSTS.SELF.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All data requests from the L1 data cache"
+ },
+ {
+ "EventCode": "0x2C",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L2_DATA_RQSTS.SELF.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All data requests from the L1 data cache"
+ },
+ {
+ "EventCode": "0x2C",
+ "Counter": "0,1",
+ "UMask": "0x42",
+ "EventName": "L2_DATA_RQSTS.SELF.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All data requests from the L1 data cache"
+ },
+ {
+ "EventCode": "0x2C",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_DATA_RQSTS.SELF.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All data requests from the L1 data cache"
+ },
+ {
+ "EventCode": "0x2D",
+ "Counter": "0,1",
+ "UMask": "0x44",
+ "EventName": "L2_LD_IFETCH.SELF.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All read requests from L1 instruction and data caches"
+ },
+ {
+ "EventCode": "0x2D",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_LD_IFETCH.SELF.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All read requests from L1 instruction and data caches"
+ },
+ {
+ "EventCode": "0x2D",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L2_LD_IFETCH.SELF.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All read requests from L1 instruction and data caches"
+ },
+ {
+ "EventCode": "0x2D",
+ "Counter": "0,1",
+ "UMask": "0x42",
+ "EventName": "L2_LD_IFETCH.SELF.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All read requests from L1 instruction and data caches"
+ },
+ {
+ "EventCode": "0x2D",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_LD_IFETCH.SELF.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All read requests from L1 instruction and data caches"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x74",
+ "EventName": "L2_RQSTS.SELF.ANY.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x71",
+ "EventName": "L2_RQSTS.SELF.ANY.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x78",
+ "EventName": "L2_RQSTS.SELF.ANY.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x72",
+ "EventName": "L2_RQSTS.SELF.ANY.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x7f",
+ "EventName": "L2_RQSTS.SELF.ANY.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.SELF.DEMAND.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L2_RQSTS.SELF.DEMAND.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.SELF.DEMAND.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x54",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x51",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x58",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x52",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x5f",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.SELF.DEMAND.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache demand requests from this core that missed the L2"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_RQSTS.SELF.DEMAND.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 cache demand requests from this core"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x74",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x71",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x78",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x72",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x7f",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x44",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x42",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x54",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x51",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x58",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x52",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x5f",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Rejected L2 cache requests"
+ },
+ {
+ "EventCode": "0x32",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "L2_NO_REQ.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cycles no L2 cache requests are pending"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0xa1",
+ "EventName": "L1D_CACHE.LD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 Cacheable Data Reads"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0xa2",
+ "EventName": "L1D_CACHE.ST",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 Cacheable Data Writes"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x83",
+ "EventName": "L1D_CACHE.ALL_REF",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 Data reads and writes"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0xa3",
+ "EventName": "L1D_CACHE.ALL_CACHE_REF",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 Data Cacheable reads and writes"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D_CACHE.REPL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1 Data line replacements"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x48",
+ "EventName": "L1D_CACHE.REPLM",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Modified cache lines allocated in the L1 data cache"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "L1D_CACHE.EVICT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Modified cache lines evicted from the L1 data cache"
+ },
+ {
+ "EventCode": "0xCB",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that hit the L2 cache (precise event)."
+ },
+ {
+ "EventCode": "0xCB",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Retired loads that miss the L2 cache"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json b/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
new file mode 100644
index 000000000000..f0e090cdb9f0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
@@ -0,0 +1,261 @@
+[
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "X87_COMP_OPS_EXE.ANY.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Floating point computational micro-ops executed."
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0x10",
+ "Counter": "0,1",
+ "UMask": "0x81",
+ "EventName": "X87_COMP_OPS_EXE.ANY.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Floating point computational micro-ops retired."
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "X87_COMP_OPS_EXE.FXCH.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "FXCH uops executed."
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0x10",
+ "Counter": "0,1",
+ "UMask": "0x82",
+ "EventName": "X87_COMP_OPS_EXE.FXCH.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "FXCH uops retired."
+ },
+ {
+ "EventCode": "0x11",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "FP_ASSIST.S",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Floating point assists."
+ },
+ {
+ "EventCode": "0x11",
+ "Counter": "0,1",
+ "UMask": "0x81",
+ "EventName": "FP_ASSIST.AR",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Floating point assists for retired operations."
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "SIMD_UOPS_EXEC.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD micro-ops executed (excluding stores)."
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB0",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "EventName": "SIMD_UOPS_EXEC.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD micro-ops retired (excluding stores)."
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "SIMD_SAT_UOP_EXEC.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD saturated arithmetic micro-ops executed."
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "EventName": "SIMD_SAT_UOP_EXEC.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD saturated arithmetic micro-ops retired."
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "SIMD_UOP_TYPE_EXEC.MUL.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed multiply micro-ops executed"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x81",
+ "EventName": "SIMD_UOP_TYPE_EXEC.MUL.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed multiply micro-ops retired"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "SIMD_UOP_TYPE_EXEC.SHIFT.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed shift micro-ops executed"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x82",
+ "EventName": "SIMD_UOP_TYPE_EXEC.SHIFT.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed shift micro-ops retired"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "SIMD_UOP_TYPE_EXEC.PACK.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed micro-ops executed"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x84",
+ "EventName": "SIMD_UOP_TYPE_EXEC.PACK.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed micro-ops retired"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "SIMD_UOP_TYPE_EXEC.UNPACK.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD unpacked micro-ops executed"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x88",
+ "EventName": "SIMD_UOP_TYPE_EXEC.UNPACK.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD unpacked micro-ops retired"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "SIMD_UOP_TYPE_EXEC.LOGICAL.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed logical micro-ops executed"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x90",
+ "EventName": "SIMD_UOP_TYPE_EXEC.LOGICAL.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed logical micro-ops retired"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "SIMD_UOP_TYPE_EXEC.ARITHMETIC.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed arithmetic micro-ops executed"
+ },
+ {
+ "EventCode": "0xB3",
+ "Counter": "0,1",
+ "UMask": "0xa0",
+ "EventName": "SIMD_UOP_TYPE_EXEC.ARITHMETIC.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD packed arithmetic micro-ops retired"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "SIMD_INST_RETIRED.PACKED_SINGLE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired Streaming SIMD Extensions (SSE) packed-single instructions."
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "SIMD_INST_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired Streaming SIMD Extensions (SSE) scalar-single instructions."
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "SIMD_INST_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired Streaming SIMD Extensions 2 (SSE2) scalar-double instructions."
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "SIMD_INST_RETIRED.VECTOR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired Streaming SIMD Extensions 2 (SSE2) vector instructions."
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "SIMD_COMP_INST_RETIRED.PACKED_SINGLE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired computational Streaming SIMD Extensions (SSE) packed-single instructions."
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "SIMD_COMP_INST_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired computational Streaming SIMD Extensions (SSE) scalar-single instructions."
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "SIMD_COMP_INST_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired computational Streaming SIMD Extensions 2 (SSE2) scalar-double instructions."
+ },
+ {
+ "EventCode": "0xCD",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "SIMD_ASSIST",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "SIMD assists invoked."
+ },
+ {
+ "EventCode": "0xCE",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "SIMD_INSTR_RETIRED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SIMD Instructions retired."
+ },
+ {
+ "EventCode": "0xCF",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "SIMD_SAT_INSTR_RETIRED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Saturated arithmetic instructions retired."
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
new file mode 100644
index 000000000000..935b7dcf067d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
@@ -0,0 +1,83 @@
+[
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Instruction fetches."
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Icache hit"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Icache miss"
+ },
+ {
+ "EventCode": "0x86",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CYCLES_ICACHE_MEM_STALLED.ICACHE_MEM_STALLED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles during which instruction fetches are stalled."
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "DECODE_STALL.PFB_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Decode stall due to PFB empty"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "DECODE_STALL.IQ_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Decode stall due to IQ full"
+ },
+ {
+ "EventCode": "0xAA",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.NON_CISC_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Non-CISC nacro instructions decoded"
+ },
+ {
+ "EventCode": "0xAA",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "MACRO_INSTS.CISC_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "CISC macro instructions decoded"
+ },
+ {
+ "EventCode": "0xAA",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "MACRO_INSTS.ALL_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All Instructions decoded"
+ },
+ {
+ "EventCode": "0xA9",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "UOPS.MS_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ. ",
+ "CounterMask": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/memory.json b/tools/perf/pmu-events/arch/x86/bonnell/memory.json
new file mode 100644
index 000000000000..3ae843b20c8a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/bonnell/memory.json
@@ -0,0 +1,154 @@
+[
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0xf",
+ "EventName": "MISALIGN_MEM_REF.SPLIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Memory references that cross an 8-byte boundary."
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x9",
+ "EventName": "MISALIGN_MEM_REF.LD_SPLIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Load splits"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0xa",
+ "EventName": "MISALIGN_MEM_REF.ST_SPLIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Store splits"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x8f",
+ "EventName": "MISALIGN_MEM_REF.SPLIT.AR",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Memory references that cross an 8-byte boundary (At Retirement)"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x89",
+ "EventName": "MISALIGN_MEM_REF.LD_SPLIT.AR",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Load splits (At Retirement)"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x8a",
+ "EventName": "MISALIGN_MEM_REF.ST_SPLIT.AR",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Store splits (Ar Retirement)"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x8c",
+ "EventName": "MISALIGN_MEM_REF.RMW_SPLIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ld-op-st splits"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x97",
+ "EventName": "MISALIGN_MEM_REF.BUBBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Nonzero segbase 1 bubble"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x91",
+ "EventName": "MISALIGN_MEM_REF.LD_BUBBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Nonzero segbase load 1 bubble"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x92",
+ "EventName": "MISALIGN_MEM_REF.ST_BUBBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Nonzero segbase store 1 bubble"
+ },
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1",
+ "UMask": "0x94",
+ "EventName": "MISALIGN_MEM_REF.RMW_BUBBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Nonzero segbase ld-op-st 1 bubble"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1",
+ "UMask": "0x81",
+ "EventName": "PREFETCH.PREFETCHT0",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT0 instructions executed."
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1",
+ "UMask": "0x82",
+ "EventName": "PREFETCH.PREFETCHT1",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT1 instructions executed."
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1",
+ "UMask": "0x84",
+ "EventName": "PREFETCH.PREFETCHT2",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT2 instructions executed."
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1",
+ "UMask": "0x86",
+ "EventName": "PREFETCH.SW_L2",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT1 and PrefetchT2 instructions executed"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1",
+ "UMask": "0x88",
+ "EventName": "PREFETCH.PREFETCHNTA",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Streaming SIMD Extensions (SSE) Prefetch NTA instructions executed"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "PREFETCH.HW_PREFETCH",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 hardware prefetch request"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1",
+ "UMask": "0xf",
+ "EventName": "PREFETCH.SOFTWARE_PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Any Software prefetch"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1",
+ "UMask": "0x8f",
+ "EventName": "PREFETCH.SOFTWARE_PREFETCH.AR",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Any Software prefetch"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/other.json b/tools/perf/pmu-events/arch/x86/bonnell/other.json
new file mode 100644
index 000000000000..4bc1c582d1cd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/bonnell/other.json
@@ -0,0 +1,450 @@
+[
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "EventName": "SEGMENT_REG_LOADS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of segment register loads."
+ },
+ {
+ "EventCode": "0x9",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "DISPATCH_BLOCKED.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Memory cluster signals to block micro-op dispatch for any reason"
+ },
+ {
+ "EventCode": "0x3A",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "EIST_TRANS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions"
+ },
+ {
+ "EventCode": "0x3B",
+ "Counter": "0,1",
+ "UMask": "0xc0",
+ "EventName": "THERMAL_TRIP",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of thermal trips"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_REQUEST_OUTSTANDING.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Outstanding cacheable data read bus requests duration."
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_REQUEST_OUTSTANDING.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Outstanding cacheable data read bus requests duration."
+ },
+ {
+ "EventCode": "0x61",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "BUS_BNR_DRV.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of Bus Not Ready signals asserted."
+ },
+ {
+ "EventCode": "0x61",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BUS_BNR_DRV.THIS_AGENT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of Bus Not Ready signals asserted."
+ },
+ {
+ "EventCode": "0x62",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "BUS_DRDY_CLOCKS.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus cycles when data is sent on the bus."
+ },
+ {
+ "EventCode": "0x62",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BUS_DRDY_CLOCKS.THIS_AGENT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus cycles when data is sent on the bus."
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_LOCK_CLOCKS.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus cycles when a LOCK signal is asserted."
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_LOCK_CLOCKS.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus cycles when a LOCK signal is asserted."
+ },
+ {
+ "EventCode": "0x64",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_DATA_RCV.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus cycles while processor receives data."
+ },
+ {
+ "EventCode": "0x65",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_BRD.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Burst read bus transactions."
+ },
+ {
+ "EventCode": "0x65",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_BRD.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Burst read bus transactions."
+ },
+ {
+ "EventCode": "0x66",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_RFO.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "RFO bus transactions."
+ },
+ {
+ "EventCode": "0x66",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_RFO.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "RFO bus transactions."
+ },
+ {
+ "EventCode": "0x67",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_WB.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Explicit writeback bus transactions."
+ },
+ {
+ "EventCode": "0x67",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_WB.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Explicit writeback bus transactions."
+ },
+ {
+ "EventCode": "0x68",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_IFETCH.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Instruction-fetch bus transactions."
+ },
+ {
+ "EventCode": "0x68",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_IFETCH.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Instruction-fetch bus transactions."
+ },
+ {
+ "EventCode": "0x69",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_INVAL.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Invalidate bus transactions."
+ },
+ {
+ "EventCode": "0x69",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_INVAL.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Invalidate bus transactions."
+ },
+ {
+ "EventCode": "0x6A",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_PWR.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Partial write bus transaction."
+ },
+ {
+ "EventCode": "0x6A",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_PWR.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Partial write bus transaction."
+ },
+ {
+ "EventCode": "0x6B",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_P.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Partial bus transactions."
+ },
+ {
+ "EventCode": "0x6B",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_P.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Partial bus transactions."
+ },
+ {
+ "EventCode": "0x6C",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_IO.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "IO bus transactions."
+ },
+ {
+ "EventCode": "0x6C",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_IO.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "IO bus transactions."
+ },
+ {
+ "EventCode": "0x6D",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_DEF.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Deferred bus transactions."
+ },
+ {
+ "EventCode": "0x6D",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_DEF.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Deferred bus transactions."
+ },
+ {
+ "EventCode": "0x6E",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_BURST.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Burst (full cache-line) bus transactions."
+ },
+ {
+ "EventCode": "0x6E",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_BURST.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Burst (full cache-line) bus transactions."
+ },
+ {
+ "EventCode": "0x6F",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_MEM.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Memory bus transactions."
+ },
+ {
+ "EventCode": "0x6F",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_MEM.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Memory bus transactions."
+ },
+ {
+ "EventCode": "0x70",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "BUS_TRANS_ANY.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All bus transactions."
+ },
+ {
+ "EventCode": "0x70",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_TRANS_ANY.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All bus transactions."
+ },
+ {
+ "EventCode": "0x77",
+ "Counter": "0,1",
+ "UMask": "0xb",
+ "EventName": "EXT_SNOOP.THIS_AGENT.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "External snoops."
+ },
+ {
+ "EventCode": "0x77",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "EXT_SNOOP.THIS_AGENT.CLEAN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "External snoops."
+ },
+ {
+ "EventCode": "0x77",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "EXT_SNOOP.THIS_AGENT.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "External snoops."
+ },
+ {
+ "EventCode": "0x77",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "EXT_SNOOP.THIS_AGENT.HITM",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "External snoops."
+ },
+ {
+ "EventCode": "0x77",
+ "Counter": "0,1",
+ "UMask": "0x2b",
+ "EventName": "EXT_SNOOP.ALL_AGENTS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "External snoops."
+ },
+ {
+ "EventCode": "0x77",
+ "Counter": "0,1",
+ "UMask": "0x21",
+ "EventName": "EXT_SNOOP.ALL_AGENTS.CLEAN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "External snoops."
+ },
+ {
+ "EventCode": "0x77",
+ "Counter": "0,1",
+ "UMask": "0x22",
+ "EventName": "EXT_SNOOP.ALL_AGENTS.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "External snoops."
+ },
+ {
+ "EventCode": "0x77",
+ "Counter": "0,1",
+ "UMask": "0x28",
+ "EventName": "EXT_SNOOP.ALL_AGENTS.HITM",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "External snoops."
+ },
+ {
+ "EventCode": "0x7A",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "BUS_HIT_DRV.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "HIT signal asserted."
+ },
+ {
+ "EventCode": "0x7A",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BUS_HIT_DRV.THIS_AGENT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "HIT signal asserted."
+ },
+ {
+ "EventCode": "0x7B",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "BUS_HITM_DRV.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "HITM signal asserted."
+ },
+ {
+ "EventCode": "0x7B",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BUS_HITM_DRV.THIS_AGENT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "HITM signal asserted."
+ },
+ {
+ "EventCode": "0x7D",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUSQ_EMPTY.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus queue is empty."
+ },
+ {
+ "EventCode": "0x7E",
+ "Counter": "0,1",
+ "UMask": "0xe0",
+ "EventName": "SNOOP_STALL_DRV.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus stalled for snoops."
+ },
+ {
+ "EventCode": "0x7E",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "SNOOP_STALL_DRV.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus stalled for snoops."
+ },
+ {
+ "EventCode": "0x7F",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "BUS_IO_WAIT.SELF",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "IO requests waiting in the bus queue."
+ },
+ {
+ "EventCode": "0xC6",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CYCLES_INT_MASKED.CYCLES_INT_MASKED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles during which interrupts are disabled."
+ },
+ {
+ "EventCode": "0xC6",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "CYCLES_INT_MASKED.CYCLES_INT_PENDING_AND_MASKED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles during which interrupts are pending and disabled."
+ },
+ {
+ "EventCode": "0xC8",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "HW_INT_RCV",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Hardware interrupts received."
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
new file mode 100644
index 000000000000..b2e681c78466
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
@@ -0,0 +1,364 @@
+[
+ {
+ "EventCode": "0x2",
+ "Counter": "0,1",
+ "UMask": "0x83",
+ "EventName": "STORE_FORWARDS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All store forwards"
+ },
+ {
+ "EventCode": "0x2",
+ "Counter": "0,1",
+ "UMask": "0x81",
+ "EventName": "STORE_FORWARDS.GOOD",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Good store forwards"
+ },
+ {
+ "EventCode": "0x3",
+ "Counter": "0,1",
+ "UMask": "0x7f",
+ "EventName": "REISSUE.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Micro-op reissues for any cause"
+ },
+ {
+ "EventCode": "0x3",
+ "Counter": "0,1",
+ "UMask": "0xff",
+ "EventName": "REISSUE.ANY.AR",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Micro-op reissues for any cause (At Retirement)"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MUL.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Multiply operations executed."
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1",
+ "UMask": "0x81",
+ "EventName": "MUL.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Multiply operations retired"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "DIV.S",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Divide operations executed."
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1",
+ "UMask": "0x81",
+ "EventName": "DIV.AR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Divide operations retired"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CYCLES_DIV_BUSY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles the divider is busy."
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Core cycles when core is not halted"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.BUS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Bus cycles when core is not halted"
+ },
+ {
+ "EventCode": "0xA",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Core cycles when core is not halted"
+ },
+ {
+ "EventCode": "0xA",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reference cycles when core is not halted."
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "BR_INST_TYPE_RETIRED.COND",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All macro conditional branch instructions."
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "BR_INST_TYPE_RETIRED.UNCOND",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All macro unconditional branch instructions, excluding calls and indirects"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "BR_INST_TYPE_RETIRED.IND",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All indirect branches that are not calls."
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "BR_INST_TYPE_RETIRED.RET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All indirect branches that have a return mnemonic"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "BR_INST_TYPE_RETIRED.DIR_CALL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All non-indirect calls"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "BR_INST_TYPE_RETIRED.IND_CALL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All indirect calls, including both register and memory indirect."
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "BR_INST_TYPE_RETIRED.COND_TAKEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Only taken macro conditional branch instructions"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "BR_MISSP_TYPE_RETIRED.COND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Mispredicted cond branch instructions retired"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "BR_MISSP_TYPE_RETIRED.IND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Mispredicted ind branches that are not calls"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "BR_MISSP_TYPE_RETIRED.RETURN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Mispredicted return branches"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "BR_MISSP_TYPE_RETIRED.IND_CALL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect. "
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1",
+ "UMask": "0x11",
+ "EventName": "BR_MISSP_TYPE_RETIRED.COND_TAKEN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Mispredicted and taken cond branch instructions retired"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (precise event)."
+ },
+ {
+ "EventCode": "0xA",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired."
+ },
+ {
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "UOPS_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Micro-ops retired."
+ },
+ {
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "UOPS_RETIRED.STALLED_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no micro-ops retired."
+ },
+ {
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Periods no micro-ops retired."
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Self-Modifying Code detected."
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired branch instructions."
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.PRED_NOT_TAKEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired branch instructions that were predicted not-taken."
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.MISPRED_NOT_TAKEN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired branch instructions that were mispredicted not-taken."
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.PRED_TAKEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired branch instructions that were predicted taken."
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.MISPRED_TAKEN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired branch instructions that were mispredicted taken."
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xc",
+ "EventName": "BR_INST_RETIRED.TAKEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired taken branch instructions."
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xf",
+ "EventName": "BR_INST_RETIRED.ANY1",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired branch instructions."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.MISPRED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired mispredicted branch instructions (precise event)."
+ },
+ {
+ "EventCode": "0xDC",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "RESOURCE_STALLS.DIV_BUSY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles issue is stalled due to div busy."
+ },
+ {
+ "EventCode": "0xE0",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "BR_INST_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch instructions decoded"
+ },
+ {
+ "EventCode": "0xE4",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "BOGUS_BR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Bogus branches"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEARS asserted."
+ },
+ {
+ "EventCode": "0x3",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "REISSUE.OVERLAP_STORE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Micro-op reissues on a store-load collision"
+ },
+ {
+ "EventCode": "0x3",
+ "Counter": "0,1",
+ "UMask": "0x81",
+ "EventName": "REISSUE.OVERLAP_STORE.AR",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Micro-op reissues on a store-load collision (At Retirement)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
new file mode 100644
index 000000000000..7bb817588721
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
@@ -0,0 +1,124 @@
+[
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1",
+ "UMask": "0x7",
+ "EventName": "DATA_TLB_MISSES.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Memory accesses that missed the DTLB."
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1",
+ "UMask": "0x5",
+ "EventName": "DATA_TLB_MISSES.DTLB_MISS_LD",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses due to load operations."
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1",
+ "UMask": "0x9",
+ "EventName": "DATA_TLB_MISSES.L0_DTLB_MISS_LD",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L0 DTLB misses due to load operations."
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1",
+ "UMask": "0x6",
+ "EventName": "DATA_TLB_MISSES.DTLB_MISS_ST",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses due to store operations."
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1",
+ "UMask": "0xa",
+ "EventName": "DATA_TLB_MISSES.L0_DTLB_MISS_ST",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L0 DTLB misses due to store operations"
+ },
+ {
+ "EventCode": "0xC",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.WALKS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of page-walks executed."
+ },
+ {
+ "EventCode": "0xC",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Duration of page-walks in core cycles"
+ },
+ {
+ "EventCode": "0xC",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_WALKS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of D-side only page walks"
+ },
+ {
+ "EventCode": "0xC",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Duration of D-side only page walks"
+ },
+ {
+ "EventCode": "0xC",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_WALKS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Number of I-Side page walks"
+ },
+ {
+ "EventCode": "0xC",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Duration of I-Side page walks"
+ },
+ {
+ "EventCode": "0x82",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB hits."
+ },
+ {
+ "EventCode": "0x82",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "ITLB.FLUSH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB flushes."
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0x82",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "ITLB.MISSES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB misses."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss the DTLB (precise event)."
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
new file mode 100644
index 000000000000..73688a9dab2a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -0,0 +1,3198 @@
+[
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x50",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe1",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe2",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the total number of L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe4",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf8",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x50",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D miss oustandings duration in cycles",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1D is locked",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand and prefetch data reads",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
+ "EventCode": "0xb2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "Errata": "BDM35",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with locked access.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load uops.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store uops.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "BDM35",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDM100",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "Errata": "BDM100, BDE70",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM100",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "BDM100",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDM100",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "BDM100",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDE70, BDM100",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANS.RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANS.CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANS.L1D_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANS.L2_FILL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of split locks in the super queue.",
+ "EventCode": "0xf4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Split locks in SQ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM76",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000018000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f80020122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00803c0122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01003c0122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02003c0122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
new file mode 100644
index 000000000000..102bfb808199
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -0,0 +1,171 @@
+[
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "BDM30",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "Errata": "BDM30",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1e",
+ "EventName": "FP_ASSIST.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xc7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2a",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x15",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
new file mode 100644
index 000000000000..b0cdf1f097a0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -0,0 +1,286 @@
+[
+ {
+ "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "IDQ.EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
new file mode 100644
index 000000000000..ff5416d29d0d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -0,0 +1,2845 @@
+[
+ {
+ "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a TSX line had a cache conflict",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_EXEC.MISC2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_EXEC.MISC3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RTM region detected inside HLE.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_EXEC.MISC4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "HLE_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times HLE commit succeeded.",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times HLE commit succeeded",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of times HLE abort was triggered.",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times HLE abort was triggered",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "HLE_RETIRED.ABORTED_MISC1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "HLE_RETIRED.ABORTED_MISC2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "HLE_RETIRED.ABORTED_MISC3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times HLE caused a fault.",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "HLE_RETIRED.ABORTED_MISC4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "HLE_RETIRED.ABORTED_MISC5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RTM_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times RTM commit succeeded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered .",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times RTM abort was triggered",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times a RTM caused a fault.",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts loads with latency value being above four.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "BDM100, BDM35",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads with latency value being above 4",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts loads with latency value being above eight.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "BDM100, BDM35",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Loads with latency value being above 8",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts loads with latency value being above 16.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "BDM100, BDM35",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Loads with latency value being above 16",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts loads with latency value being above 32.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "BDM100, BDM35",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Loads with latency value being above 32",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts loads with latency value being above 64.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "BDM100, BDM35",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2003",
+ "BriefDescription": "Loads with latency value being above 64",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts loads with latency value being above 128.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "BDM100, BDM35",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1009",
+ "BriefDescription": "Loads with latency value being above 128",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts loads with latency value being above 256.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "BDM100, BDM35",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "503",
+ "BriefDescription": "Loads with latency value being above 256",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts loads with latency value being above 512.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "BDM100, BDM35",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "101",
+ "BriefDescription": "Loads with latency value being above 512",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts writebacks (modified to exclusive) that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000040 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L2_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000200 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000090 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000120 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000240 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_PF_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000091 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20003c0122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the target was non-DRAM system address.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f84000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with no details on snoop-related information.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 with a snoop miss response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000122 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/other.json b/tools/perf/pmu-events/arch/x86/broadwell/other.json
new file mode 100644
index 000000000000..edf14f0d0eaf
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/other.json
@@ -0,0 +1,44 @@
+[
+ {
+ "PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPL_CYCLES.RING0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
new file mode 100644
index 000000000000..78913ae87703
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -0,0 +1,1417 @@
+[
+ {
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired from execution.",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when divider is busy executing divide operations",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3c",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
+ "EventCode": "0x4c",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "EventCode": "0x5E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts not taken macro-conditional branch instructions.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x90",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired direct near calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc2",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc8",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd0",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired direct near calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of uops executed from any thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of uops executed on the core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "BDM61",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "Errata": "BDM11, BDM55",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "CounterHTOff": "1"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Actually retired uops.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "Errata": "BDW98",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Far branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDW98",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xe6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "EventCode": "0xA0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
new file mode 100644
index 000000000000..4301e6fbc5eb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
@@ -0,0 +1,388 @@
+[
+ {
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycle count for an Extended Page table walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "Errata": "BDM69, BDM98",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "Errata": "BDM69, BDM98",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "Errata": "BDM69, BDM98",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x22",
+ "Errata": "BDM69, BDM98",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x14",
+ "Errata": "BDM69, BDM98",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "Errata": "BDM69, BDM98",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "Errata": "BDM69, BDM98",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "Errata": "BDM69",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
new file mode 100644
index 000000000000..36fe398029b9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -0,0 +1,774 @@
+[
+ {
+ "EventCode": "0x24",
+ "UMask": "0x21",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x30",
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x50",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe1",
+ "BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe2",
+ "BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe4",
+ "BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "This event counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xf8",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "UMask": "0x50",
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "UMask": "0x41",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "UMask": "0x4f",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "L1D miss oustandings duration in cycles",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x51",
+ "UMask": "0x1",
+ "BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x63",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x1",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x2",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x4",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x8",
+ "BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb2",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x11",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x12",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x21",
+ "BriefDescription": "Retired load uops with locked access.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "Errata": "BDM35",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x41",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x42",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "SampleAfterValue": "100003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x81",
+ "BriefDescription": "All retired load uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x82",
+ "BriefDescription": "All retired store uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "SampleAfterValue": "2000003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "Errata": "BDM35",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x10",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x20",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "Errata": "BDM100, BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x40",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x1",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "Errata": "BDE70, BDM100",
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x1",
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x2",
+ "BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x4",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x8",
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x10",
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x20",
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x40",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x80",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x1",
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x2",
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x4",
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x7",
+ "BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "UMask": "0x5",
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf4",
+ "UMask": "0x10",
+ "BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "This event counts the number of split locks in the super queue.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
new file mode 100644
index 000000000000..4ae1ea24f22f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
@@ -0,0 +1,171 @@
+[
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x8",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "Errata": "BDM30",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x10",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "Errata": "BDM30",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x1",
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2",
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x4",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x10",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x2",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x4",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x10",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x1e",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.ANY",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3c",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x15",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
new file mode 100644
index 000000000000..06bf0a40e568
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
@@ -0,0 +1,286 @@
+[
+ {
+ "EventCode": "0x79",
+ "UMask": "0x2",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x18",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x18",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x24",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x24",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x3c",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x1",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x2",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "CounterMask": "4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "CounterMask": "3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xAB",
+ "UMask": "0x2",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
new file mode 100644
index 000000000000..cfa1e5876ec3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
@@ -0,0 +1,433 @@
+[
+ {
+ "EventCode": "0x05",
+ "UMask": "0x1",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x05",
+ "UMask": "0x2",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times a TSX line had a cache conflict",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC1",
+ "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x4",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x8",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x10",
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times HLE commit succeeded",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times HLE abort was triggered",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PublicDescription": "Number of times HLE abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times HLE caused a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x80",
+ "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times RTM commit succeeded",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times RTM abort was triggered",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Number of times RTM abort was triggered .",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times a RTM caused a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x80",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 4",
+ "PEBS": "2",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above four.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 8",
+ "PEBS": "2",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above eight.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 16",
+ "PEBS": "2",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 16.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 32",
+ "PEBS": "2",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 32.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 64",
+ "PEBS": "2",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 64.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "2003",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 128",
+ "PEBS": "2",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 128.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "1009",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 256",
+ "PEBS": "2",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 256.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "503",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 512",
+ "PEBS": "2",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 512.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "101",
+ "CounterHTOff": "3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/other.json b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
new file mode 100644
index 000000000000..718fcb1db2ee
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
@@ -0,0 +1,44 @@
+[
+ {
+ "EventCode": "0x5C",
+ "UMask": "0x1",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING0",
+ "PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x5C",
+ "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x63",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
new file mode 100644
index 000000000000..02b4e1035f2d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -0,0 +1,1417 @@
+[
+ {
+ "EventCode": "0x00",
+ "UMask": "0x1",
+ "BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 1",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x3",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "EventCode": "0x03",
+ "UMask": "0x2",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "UMask": "0x8",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x07",
+ "UMask": "0x1",
+ "BriefDescription": "False dependencies in MOB due to partial compare",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x10",
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x20",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x40",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x14",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when divider is busy executing divide operations",
+ "Counter": "0,1,2,3",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3c",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x4c",
+ "UMask": "0x1",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4C",
+ "UMask": "0x2",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x1",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x2",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x4",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "UMask": "0x1",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x41",
+ "BriefDescription": "Not taken macro-conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts not taken macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x81",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x82",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x84",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x88",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x90",
+ "BriefDescription": "Taken speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired indirect calls",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc1",
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc2",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc4",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc8",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xd0",
+ "BriefDescription": "Speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xff",
+ "BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x41",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x81",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x84",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x88",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xc1",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xc4",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xff",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x1",
+ "BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.RS",
+ "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "CounterMask": "1",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "CounterMask": "2",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "CounterMask": "4",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "CounterMask": "5",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "CounterMask": "6",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "CounterMask": "12",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "BDM61",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x1",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "PEBS": "2",
+ "Counter": "1",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "Errata": "BDM11, BDM55",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "1"
+ },
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Actually retired uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "CounterMask": "10",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x4",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x20",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x1",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x8",
+ "BriefDescription": "Return instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x10",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x20",
+ "BriefDescription": "Taken branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x40",
+ "BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x0",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x8",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x4",
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x20",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xe6",
+ "UMask": "0x1f",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA0",
+ "UMask": "0x3",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
new file mode 100644
index 000000000000..5ce8b67ba076
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
@@ -0,0 +1,388 @@
+[
+ {
+ "EventCode": "0x08",
+ "UMask": "0x1",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x2",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x4",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x8",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x20",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x40",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x1",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x2",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x4",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x8",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x20",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x40",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4F",
+ "UMask": "0x10",
+ "BriefDescription": "Cycle count for an Extended Page table walk.",
+ "Counter": "0,1,2,3",
+ "EventName": "EPT.WALK_CYCLES",
+ "PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x1",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x2",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x4",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x8",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x20",
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x40",
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAE",
+ "UMask": "0x1",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x11",
+ "BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBD",
+ "UMask": "0x1",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "UMask": "0x20",
+ "BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
new file mode 100644
index 000000000000..d1d043829b95
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -0,0 +1,942 @@
+[
+ {
+ "EventCode": "0x24",
+ "UMask": "0x21",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x30",
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x50",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe1",
+ "BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe2",
+ "BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe4",
+ "BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "This event counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xf8",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "UMask": "0x50",
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "UMask": "0x41",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "UMask": "0x4f",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "L1D miss oustandings duration in cycles",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x51",
+ "UMask": "0x1",
+ "BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The \"Offcore outstanding\" state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x63",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x1",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x2",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x4",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x8",
+ "BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable \"Demands\" and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb2",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x11",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x12",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x21",
+ "BriefDescription": "Retired load uops with locked access.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "Errata": "BDM35",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x41",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x42",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "SampleAfterValue": "100003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x81",
+ "BriefDescription": "All retired load uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x82",
+ "BriefDescription": "All retired store uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
+ "SampleAfterValue": "2000003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "Errata": "BDM35",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x10",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x20",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "Errata": "BDM100, BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x40",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
+ "Errata": "BDM100",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x1",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "Errata": "BDE70, BDM100",
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x10",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x20",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
+ "Errata": "BDE70",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x1",
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x2",
+ "BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x4",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x8",
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x10",
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x20",
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x40",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x80",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x1",
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x2",
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x4",
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x7",
+ "BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "UMask": "0x5",
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf4",
+ "UMask": "0x10",
+ "BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "This event counts the number of split locks in the super queue.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all requests that hit in the L3",
+ "MSRValue": "0x3f803c8fff",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c07f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c07f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0244",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c0091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "MSRValue": "0x3f803c0200",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
+ "MSRValue": "0x3f803c0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
new file mode 100644
index 000000000000..4ae1ea24f22f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -0,0 +1,171 @@
+[
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x8",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "Errata": "BDM30",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x10",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "Errata": "BDM30",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x1",
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2",
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x4",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x10",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x2",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x4",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x10",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x1e",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.ANY",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x3c",
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SINGLE",
+ "SampleAfterValue": "2000005",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x15",
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
new file mode 100644
index 000000000000..06bf0a40e568
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
@@ -0,0 +1,286 @@
+[
+ {
+ "EventCode": "0x79",
+ "UMask": "0x2",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may \"bypass\" the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x18",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x18",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may \"bypass\" the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x24",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x24",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x3c",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may \"bypass\" the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x1",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x2",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "CounterMask": "4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "CounterMask": "3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xAB",
+ "UMask": "0x2",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
new file mode 100644
index 000000000000..1204ea8ff30d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -0,0 +1,649 @@
+[
+ {
+ "EventCode": "0x05",
+ "UMask": "0x1",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x05",
+ "UMask": "0x2",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times a TSX line had a cache conflict",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC1",
+ "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x4",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x8",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x10",
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times HLE commit succeeded",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times HLE abort was triggered",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PublicDescription": "Number of times HLE abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times HLE caused a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x80",
+ "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times RTM commit succeeded",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times RTM abort was triggered",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Number of times RTM abort was triggered .",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times a RTM caused a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x80",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 4",
+ "PEBS": "2",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above four.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 8",
+ "PEBS": "2",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above eight.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 16",
+ "PEBS": "2",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 16.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 32",
+ "PEBS": "2",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 32.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 64",
+ "PEBS": "2",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 64.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "2003",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 128",
+ "PEBS": "2",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 128.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "1009",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 256",
+ "PEBS": "2",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 256.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "503",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 512",
+ "PEBS": "2",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "Errata": "BDM100, BDM35",
+ "PublicDescription": "This event counts loads with latency value being above 512.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "101",
+ "CounterHTOff": "3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all requests that miss in the L3",
+ "MSRValue": "0x3fbfc08fff",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x087fc007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103fc007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063bc007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x06040007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
+ "MSRValue": "0x3fbfc007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0604000244",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
+ "MSRValue": "0x3fbfc00244",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0604000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
+ "MSRValue": "0x3fbfc00122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x087fc00091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103fc00091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063bc00091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0604000091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
+ "MSRValue": "0x3fbfc00091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "MSRValue": "0x3fbfc00200",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
+ "MSRValue": "0x3fbfc00100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103fc00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/other.json b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
new file mode 100644
index 000000000000..718fcb1db2ee
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
@@ -0,0 +1,44 @@
+[
+ {
+ "EventCode": "0x5C",
+ "UMask": "0x1",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING0",
+ "PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x5C",
+ "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x63",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
new file mode 100644
index 000000000000..02b4e1035f2d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -0,0 +1,1417 @@
+[
+ {
+ "EventCode": "0x00",
+ "UMask": "0x1",
+ "BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 1",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x3",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "EventCode": "0x03",
+ "UMask": "0x2",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "UMask": "0x8",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x07",
+ "UMask": "0x1",
+ "BriefDescription": "False dependencies in MOB due to partial compare",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x10",
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x20",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x40",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x14",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when divider is busy executing divide operations",
+ "Counter": "0,1,2,3",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3c",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x4c",
+ "UMask": "0x1",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4C",
+ "UMask": "0x2",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x1",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x2",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x4",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "UMask": "0x1",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x41",
+ "BriefDescription": "Not taken macro-conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts not taken macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x81",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x82",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x84",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x88",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x90",
+ "BriefDescription": "Taken speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired indirect calls",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc1",
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc2",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc4",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc8",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xd0",
+ "BriefDescription": "Speculative and retired direct near calls",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xff",
+ "BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x41",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x81",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x84",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x88",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xc1",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xc4",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xff",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x1",
+ "BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.RS",
+ "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "CounterMask": "1",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "CounterMask": "2",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "CounterMask": "4",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "CounterMask": "5",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "CounterMask": "6",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "CounterMask": "12",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of Uops delivered by the LSD. ",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "BDM61",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x1",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "PEBS": "2",
+ "Counter": "1",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "Errata": "BDM11, BDM55",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "1"
+ },
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Actually retired uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "CounterMask": "10",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x4",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x20",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x1",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x8",
+ "BriefDescription": "Return instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x10",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x20",
+ "BriefDescription": "Taken branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x40",
+ "BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "BDW98",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x0",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x8",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x4",
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x20",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xe6",
+ "UMask": "0x1f",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA0",
+ "UMask": "0x3",
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "Counter": "0,1,2,3",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
new file mode 100644
index 000000000000..5ce8b67ba076
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
@@ -0,0 +1,388 @@
+[
+ {
+ "EventCode": "0x08",
+ "UMask": "0x1",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x2",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x4",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x8",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x20",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x40",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x1",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x2",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x4",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x8",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x20",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x40",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4F",
+ "UMask": "0x10",
+ "BriefDescription": "Cycle count for an Extended Page table walk.",
+ "Counter": "0,1,2,3",
+ "EventName": "EPT.WALK_CYCLES",
+ "PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x1",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x2",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x4",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x8",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "Errata": "BDM69",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x20",
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x40",
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAE",
+ "UMask": "0x1",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x11",
+ "BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "Errata": "BDM69, BDM98",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBD",
+ "UMask": "0x1",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "UMask": "0x20",
+ "BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "Errata": "BDM69",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
new file mode 100644
index 000000000000..4e02e1e5e70d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -0,0 +1,1127 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
+ "EventCode": "0x30",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "L2_REJECT_XQ.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests rejected by the XQ"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
+ "EventCode": "0x31",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CORE_REJECT_L2Q.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests rejected by the L2Q "
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache requests"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache request misses"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts cycles that an ICache miss is outstanding, and instruction fetch is stalled. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes, while an Icache miss outstanding. Note this event is not the same as cycles to retrieve an instruction due to an Icache miss. Rather, it is the part of the Instruction Cache (ICache) miss time where no bytes are available for the decoder.",
+ "EventCode": "0x86",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles where code-fetch is stalled and an ICache miss is outstanding. This is not the same as an ICache Miss."
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of load uops retired.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of store uops retired.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Store uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x83",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Memory uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts locked memory uops retired. This includes \"regular\" locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Locked load uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x43",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that hit the L1 data cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that miss in the L2 cache.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that missed L2 (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads retired that hit WCB (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads retired that came from DRAM (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DL1.DIRTY_EVICTION",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L1 Cache evictions for dirty data"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "EventCode": "0xB7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x36000032b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x10000032b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x04000032b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x02000032b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x00000432b7 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040022 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200003091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000043091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads (demand & prefetch) that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200003010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000043010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000018000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600004800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000044800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200004000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000044000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200002000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000042000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200001000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000041000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010400 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040020 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040010 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040008 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3600000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/frontend.json b/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
new file mode 100644
index 000000000000..9ba08518649e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
@@ -0,0 +1,52 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
+ "EventCode": "0xE7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MS_DECODED.MS_ENTRY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "MS decode starts"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
+ "EventCode": "0xE9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Decode restrictions due to predicting wrong instruction length"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
new file mode 100644
index 000000000000..ac8b0d365a19
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -0,0 +1,34 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Machine clears due to memory ordering issue"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops that split a page (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Store uops that split a page (Precise event capable)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/other.json b/tools/perf/pmu-events/arch/x86/goldmont/other.json
new file mode 100644
index 000000000000..df25ca9542f1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmont/other.json
@@ -0,0 +1,52 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle to recover"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Unfilled issue slots per cycle"
+ },
+ {
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts hardware interrupts received by the processor.",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Hardware interrupts received (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles pending interrupts are masked (Precise event capable)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
new file mode 100644
index 000000000000..07f00041f56f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -0,0 +1,433 @@
+[
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7e",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfe",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf9",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near call instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near relative CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfd",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near relative call instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfb",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near return branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf7",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired near return instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xeb",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0xbf",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired far branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7e",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfe",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0xfb",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf7",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0xeb",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts uops which retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "UOPS_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops retired (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.MS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "MS uops retired (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel? architecture processors.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Self-Modifying Code detected"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Machine clears due to FP assists"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Machine clears due to memory disambiguation"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts machine clears for any reason.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All machine clears"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "UOPS_NOT_DELIVERED.ANY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Uops issued to the back end per cycle"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles if either divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a divider is busy"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the integer divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles the integer divide unit is busy"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
+ "EventCode": "0xCD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles the FP divide unit is busy"
+ },
+ {
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired (Fixed event)"
+ },
+ {
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted (Fixed event)"
+ },
+ {
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "BACLEARs asserted for any branch type"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts BACLEARS on return instructions.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BACLEARS.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "BACLEARs asserted for return branch"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BACLEARS.COND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "BACLEARs asserted for conditional branch"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.UTLB_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Retired taken branch instructions (Precise event capable)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
new file mode 100644
index 000000000000..3202c4478836
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -0,0 +1,75 @@
+[
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
+ "EventCode": "0x81",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "ITLB misses"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts load uops retired that caused a DTLB miss.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts store uops retired that caused a DTLB miss.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)"
+ },
+ {
+ "PEBS": "2",
+ "CollectPEBSRecord": "2",
+ "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x13",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of D-side page-walks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of I-side pagewalks in cycles"
+ },
+ {
+ "CollectPEBSRecord": "1",
+ "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of page-walks in cycles"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
new file mode 100644
index 000000000000..bfb5ebf48c54
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -0,0 +1,1041 @@
+[
+ {
+ "PublicDescription": "Demand data read requests that missed L2, no rejects.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x50",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe1",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe2",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe4",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf8",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not rejected writebacks that hit L2 cache.",
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x50",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D miss oustandings duration in cycles",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding demand data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles in which the L1D is locked.",
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1D is locked",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand data read requests sent to uncore.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand code read requests sent to uncore.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand and prefetch data reads",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "Errata": "HSD29, HSM30",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "Errata": "HSD29, HSM30",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "Errata": "HSD76, HSD29, HSM30",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with locked access.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "Errata": "HSD29, HSM30",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "Errata": "HSD29, HSM30",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "Errata": "HSD29, HSM30",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load uops.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "Errata": "HSD29, HSM30",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store uops.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD29, HSM30",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD76, HSD29, HSM30",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "HSM30",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "Errata": "HSD29, HSM30",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "Errata": "HSM30",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD74, HSD29, HSD25, HSM30",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PublicDescription": "Demand data read requests that access L2 cache.",
+ "EventCode": "0xf0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFO requests that access L2 cache.",
+ "EventCode": "0xf0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANS.RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache accesses when fetching instructions.",
+ "EventCode": "0xf0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANS.CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Any MLC or L3 HW prefetch accessing L2, including rejects.",
+ "EventCode": "0xf0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L1D writebacks that access L2 cache.",
+ "EventCode": "0xf0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANS.L1D_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 fill requests that access L2 cache.",
+ "EventCode": "0xf0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANS.L2_FILL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 writebacks that access L2 cache.",
+ "EventCode": "0xf0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Transactions accessing L2 pipe.",
+ "EventCode": "0xf0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in I state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in S state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in E state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Clean L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by demand",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Split locks in SQ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "All requests that missed L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "All requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "Errata": "HSD78",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c8fff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all requests that hit in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c07f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c07f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
new file mode 100644
index 000000000000..1732fa49c6d2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -0,0 +1,83 @@
+[
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "HSD56, HSM57",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "Errata": "HSD56, HSM57",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of X87 FP assists due to output values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of X87 FP assists due to input values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SIMD FP assists due to output values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1e",
+ "EventName": "FP_ASSIST.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "EventCode": "0xC6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "AVX_INSTS.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/frontend.json b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
new file mode 100644
index 000000000000..57a1ce46971f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
@@ -0,0 +1,294 @@
+[
+ {
+ "PublicDescription": "Counts cycles the IDQ is empty.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD135",
+ "EventName": "IDQ.EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles MITE is delivered at least one uop. Set Cmask = 1.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of uops delivered to IDQ from any path.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts Instruction Cache (ICACHE) misses.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE.IFETCH_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event count the number of undelivered (unallocated) uops from the Front-end to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. The Front-end can allocate up to 4 uops per cycle so this event can increment 0-4 times per cycle depending on the number of unallocated uops. This event is counted on a per-core basis.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD135",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number cycles during which the Front-end allocated exactly zero uops to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. This event is counted on a per-core basis.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD135",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD135",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD135",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD135",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD135",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
new file mode 100644
index 000000000000..aab981b42339
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -0,0 +1,655 @@
+[
+ {
+ "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative cache-line split store-address uops dispatched to L1D.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional writes.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_EXEC.MISC2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_EXEC.MISC3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_EXEC.MISC4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of memory ordering machine clears detected. Memory ordering machine clears can result from memory address aliasing or snoops from another hardware thread or core to data inflight in the pipeline. Machine clears can have a significant performance impact if they are happening frequently.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "HLE_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution started.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution successfully committed.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "HLE_RETIRED.ABORTED_MISC1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "HLE_RETIRED.ABORTED_MISC2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "HLE_RETIRED.ABORTED_MISC3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "Errata": "HSD65",
+ "EventName": "HLE_RETIRED.ABORTED_MISC4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts).",
+ "EventCode": "0xc8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "HLE_RETIRED.ABORTED_MISC5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RTM_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution started.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution successfully committed.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "Errata": "HSD65",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "EventCode": "0xc9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads with latency value being above 4.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Loads with latency value being above 8.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Loads with latency value being above 16.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads with latency value being above 32.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2003",
+ "BriefDescription": "Loads with latency value being above 64.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1009",
+ "BriefDescription": "Loads with latency value being above 128.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "503",
+ "BriefDescription": "Loads with latency value being above 256.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "101",
+ "BriefDescription": "Loads with latency value being above 512.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc08fff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all requests that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01004007f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc007f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss in the L3",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/other.json b/tools/perf/pmu-events/arch/x86/haswell/other.json
new file mode 100644
index 000000000000..85d6a14baf9d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/other.json
@@ -0,0 +1,43 @@
+[
+ {
+ "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPL_CYCLES.RING0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
new file mode 100644
index 000000000000..0099848607ad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -0,0 +1,1329 @@
+[
+ {
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "Errata": "HSD140, HSD143",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired from execution.",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "loads blocked by overlapping with store buffer that cannot be forwarded",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline which can have a performance impact.",
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles spent waiting for a recovery after an event such as a processor nuke, JEClear, assist, hle/rtm abort etc.",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops issued by the Front-end of the pipeline to the Back-end. This event is counted at the allocation stage and will count both retired and non-retired uops.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (for example, 2 sources + immediate) regardless of whether it is a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ARITH.DIVIDER_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3c",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "EventCode": "0x4c",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "EventCode": "0x4c",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of integer move elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SIMD move elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of integer move elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SIMD move elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when the Reservation Station ( RS ) is empty for the thread. The RS is a structure that buffers allocated micro-ops from the Front-end. If there are many cycles when the RS is empty, it may represent an underflow of instructions delivered from the Front-end.",
+ "EventCode": "0x5E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Stall cycles due to IQ is full.",
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stall cycles because IQ is full",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x90",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc2",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc8",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd0",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a uop is dispatched on port 0 in this thread.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles allocation is stalled due to resource related reason.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD135",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which no instructions were allocated because no Store Buffers (SB) were available.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with pending L2 miss loads. Set Cmask=2 to count cycle.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD78",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending memory loads.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of loads missed L2.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to L2 cache misses.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline and there were memory instructions pending (waiting for data).",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to memory subsystem.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to L1 data cache misses",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Number of uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of uops executed on the core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of instructions at retirement.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "HSD11, HSD140",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "Errata": "HSD140",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "CounterHTOff": "1"
+ },
+ {
+ "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Actually retired uops.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of near return instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Return instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of near taken branches retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of far branches retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Far branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
+ "EventCode": "0xe6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
new file mode 100644
index 000000000000..ce80a08d0f08
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
@@ -0,0 +1,484 @@
+[
+ {
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks due to demand load misses that caused 4K page walks in any TLB levels.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks due to demand load misses that caused 2M/4M page walks in any TLB levels.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 4K page structure.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 2M/4M page structure.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks. (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4f",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycle count for an Extended Page table walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in ITLB that causes a page walk of any page size.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks due to misses in ITLB 4K page entries.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks due to misses in ITLB 2M/4M page entries.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "ITLB misses that hit STLB (4K).",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "ITLB misses that hit STLB (2M).",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
+ "EventCode": "0xae",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L1+FB.",
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of DTLB page walker hits in the L1+FB",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L2.",
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x22",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of ITLB page walker hits in the L2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x14",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L3.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x28",
+ "Errata": "HSD25",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x48",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_MEMORY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in memory.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Count number of STLB flush attempts.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STLB flush attempts",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x60",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
new file mode 100644
index 000000000000..f1bae0817a6f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -0,0 +1,1077 @@
+[
+ {
+ "EventCode": "0x24",
+ "UMask": "0x21",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand data read requests that missed L2, no rejects.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x41",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand data read requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x30",
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x50",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe1",
+ "BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "Errata": "HSD78",
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe2",
+ "BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe4",
+ "BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts all L2 code requests.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xf8",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts all L2 HW prefetcher requests.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "UMask": "0x50",
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "PublicDescription": "Not rejected writebacks that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "UMask": "0x41",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "UMask": "0x4f",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "L1D miss oustandings duration in cycles",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x51",
+ "UMask": "0x1",
+ "BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "Errata": "HSD78, HSD62, HSD61",
+ "PublicDescription": "Offcore outstanding demand data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "Errata": "HSD62, HSD61",
+ "PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "Errata": "HSD62, HSD61",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "Errata": "HSD62, HSD61",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "HSD78, HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
+ "Errata": "HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x63",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles when L1D is locked",
+ "Counter": "0,1,2,3",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "Cycles in which the L1D is locked.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x1",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand data read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x2",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Demand code read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x4",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x8",
+ "BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb2",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x11",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "Errata": "HSD29, HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x12",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "Errata": "HSD29, HSM30",
+ "SampleAfterValue": "100003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x21",
+ "BriefDescription": "Retired load uops with locked access.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "Errata": "HSD76, HSD29, HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x41",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "Errata": "HSD29, HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x42",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "Errata": "HSD29, HSM30",
+ "SampleAfterValue": "100003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x81",
+ "BriefDescription": "All retired load uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "Errata": "HSD29, HSM30",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x82",
+ "BriefDescription": "All retired store uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "Errata": "HSD29, HSM30",
+ "SampleAfterValue": "2000003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "Errata": "HSD29, HSM30",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "Errata": "HSD76, HSD29, HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "Errata": "HSM30",
+ "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x10",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "Errata": "HSD29, HSM30",
+ "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x20",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x40",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "Errata": "HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x1",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "Errata": "HSD74, HSD29, HSD25, HSM30",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Errata": "HSD29, HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x10",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
+ "Errata": "HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x20",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
+ "Errata": "HSM30",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xf0",
+ "UMask": "0x1",
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand data read requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf0",
+ "UMask": "0x2",
+ "BriefDescription": "RFO requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "RFO requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf0",
+ "UMask": "0x4",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "L2 cache accesses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf0",
+ "UMask": "0x8",
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "Any MLC or L3 HW prefetch accessing L2, including rejects.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf0",
+ "UMask": "0x10",
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf0",
+ "UMask": "0x20",
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf0",
+ "UMask": "0x40",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf0",
+ "UMask": "0x80",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "Transactions accessing L2 pipe.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x1",
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "L2 cache lines in I state filling L2.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x2",
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "L2 cache lines in S state filling L2.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x4",
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "L2 cache lines in E state filling L2.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x7",
+ "BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "UMask": "0x5",
+ "BriefDescription": "Clean L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "PublicDescription": "Clean L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "UMask": "0x6",
+ "BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xf4",
+ "UMask": "0x10",
+ "BriefDescription": "Split locks in SQ",
+ "Counter": "0,1,2,3",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x42",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x44",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "Errata": "HSD78",
+ "PublicDescription": "Demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "Errata": "HSD78",
+ "PublicDescription": "All requests that missed L2.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "Errata": "HSD78",
+ "PublicDescription": "All requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "Errata": "HSD78, HSD62, HSD61",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "2",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3",
+ "MSRValue": "0x3f803c0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3",
+ "MSRValue": "0x3f803c0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "MSRValue": "0x3f803c0040",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3",
+ "MSRValue": "0x3f803c0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3",
+ "MSRValue": "0x3f803c0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the L3",
+ "MSRValue": "0x3f803c0200",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c0091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c0244",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x04003c07f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003c07f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all requests that hit in the L3",
+ "MSRValue": "0x3f803c8fff",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
new file mode 100644
index 000000000000..6282aed6e090
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
@@ -0,0 +1,83 @@
+[
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x8",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "Errata": "HSD56, HSM57",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x10",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "Errata": "HSD56, HSM57",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x2",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "Number of X87 FP assists due to output values.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x4",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "PublicDescription": "Number of X87 FP assists due to input values.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "Number of SIMD FP assists due to output values.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x10",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x1e",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.ANY",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x7",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
+ "Counter": "0,1,2,3",
+ "EventName": "AVX_INSTS.ALL",
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
new file mode 100644
index 000000000000..2d0c7aac1e61
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
@@ -0,0 +1,294 @@
+[
+ {
+ "EventCode": "0x79",
+ "UMask": "0x2",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.EMPTY",
+ "Errata": "HSD135",
+ "PublicDescription": "Counts cycles the IDQ is empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x18",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x18",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x24",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x24",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles MITE is delivered at least one uop. Set Cmask = 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x3c",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "Number of uops delivered to IDQ from any path.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x1",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x2",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts Instruction Cache (ICACHE) misses.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.IFETCH_STALL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "Errata": "HSD135",
+ "PublicDescription": "This event count the number of undelivered (unallocated) uops from the Front-end to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. The Front-end can allocate up to 4 uops per cycle so this event can increment 0-4 times per cycle depending on the number of unallocated uops. This event is counted on a per-core basis.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "CounterMask": "4",
+ "Errata": "HSD135",
+ "PublicDescription": "This event counts the number cycles during which the Front-end allocated exactly zero uops to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. This event is counted on a per-core basis.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "CounterMask": "3",
+ "Errata": "HSD135",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "CounterMask": "2",
+ "Errata": "HSD135",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "CounterMask": "1",
+ "Errata": "HSD135",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "CounterMask": "1",
+ "Errata": "HSD135",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xAB",
+ "UMask": "0x2",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
new file mode 100644
index 000000000000..0886cc000d22
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -0,0 +1,739 @@
+[
+ {
+ "EventCode": "0x05",
+ "UMask": "0x1",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x05",
+ "UMask": "0x2",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "Speculative cache-line split store-address uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional writes.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x4",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x8",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x10",
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering machine clears detected. Memory ordering machine clears can result from memory address aliasing or snoops from another hardware thread or core to data inflight in the pipeline. Machine clears can have a significant performance impact if they are happening frequently.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times an HLE execution successfully committed.",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions.",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC4",
+ "Errata": "HSD65",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc8",
+ "UMask": "0x80",
+ "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times an RTM execution successfully committed.",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "Errata": "HSD65",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xc9",
+ "UMask": "0x80",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 4.",
+ "PEBS": "2",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "Errata": "HSD76, HSD25, HSM26",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 8.",
+ "PEBS": "2",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "Errata": "HSD76, HSD25, HSM26",
+ "TakenAlone": "1",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 16.",
+ "PEBS": "2",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "Errata": "HSD76, HSD25, HSM26",
+ "TakenAlone": "1",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 32.",
+ "PEBS": "2",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "Errata": "HSD76, HSD25, HSM26",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 64.",
+ "PEBS": "2",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "Errata": "HSD76, HSD25, HSM26",
+ "TakenAlone": "1",
+ "SampleAfterValue": "2003",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 128.",
+ "PEBS": "2",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "Errata": "HSD76, HSD25, HSM26",
+ "TakenAlone": "1",
+ "SampleAfterValue": "1009",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 256.",
+ "PEBS": "2",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "Errata": "HSD76, HSD25, HSM26",
+ "TakenAlone": "1",
+ "SampleAfterValue": "503",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Loads with latency value being above 512.",
+ "PEBS": "2",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "Errata": "HSD76, HSD25, HSM26",
+ "TakenAlone": "1",
+ "SampleAfterValue": "101",
+ "CounterHTOff": "3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads that miss in the L3",
+ "MSRValue": "0x3fbfc00001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0600400001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3",
+ "MSRValue": "0x3fbfc00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0600400002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103fc00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads that miss in the L3",
+ "MSRValue": "0x3fbfc00004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0600400004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3",
+ "MSRValue": "0x3fbfc00010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3",
+ "MSRValue": "0x3fbfc00020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "MSRValue": "0x3fbfc00040",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3",
+ "MSRValue": "0x3fbfc00080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3",
+ "MSRValue": "0x3fbfc00100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that miss in the L3",
+ "MSRValue": "0x3fbfc00200",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3",
+ "MSRValue": "0x3fbfc00091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0600400091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063f800091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103fc00091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x083fc00091",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3",
+ "MSRValue": "0x3fbfc00122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0600400122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss in the L3",
+ "MSRValue": "0x3fbfc00244",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x0600400244",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss in the L3",
+ "MSRValue": "0x3fbfc007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x06004007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x063f8007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x103fc007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x083fc007f7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all requests that miss in the L3",
+ "MSRValue": "0x3fbfc08fff",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/other.json b/tools/perf/pmu-events/arch/x86/haswellx/other.json
new file mode 100644
index 000000000000..4e1b6ce96ca3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/other.json
@@ -0,0 +1,43 @@
+[
+ {
+ "EventCode": "0x5C",
+ "UMask": "0x1",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING0",
+ "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "UMask": "0x2",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x5C",
+ "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x63",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "Counter": "0,1,2,3",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
new file mode 100644
index 000000000000..c3a163d34bd7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -0,0 +1,1329 @@
+[
+ {
+ "EventCode": "0x00",
+ "UMask": "0x1",
+ "BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 1",
+ "EventName": "INST_RETIRED.ANY",
+ "Errata": "HSD140, HSD143",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x3",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "EventCode": "0x03",
+ "UMask": "0x2",
+ "BriefDescription": "loads blocked by overlapping with store buffer that cannot be forwarded",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "UMask": "0x8",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x07",
+ "UMask": "0x1",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline which can have a performance impact.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...)",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts the number of cycles spent waiting for a recovery after an event such as a processor nuke, JEClear, assist, hle/rtm abort etc.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of uops issued by the Front-end of the pipeline to the Back-end. This event is counted at the allocation stage and will count both retired and non-retired uops.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x10",
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x20",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (for example, 2 sources + immediate) regardless of whether it is a result of LEA instruction or not.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x40",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x14",
+ "UMask": "0x2",
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "Counter": "0,1,2,3",
+ "EventName": "ARITH.DIVIDER_UOPS",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3c",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x4c",
+ "UMask": "0x1",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "Counter": "0,1,2,3",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4c",
+ "UMask": "0x2",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "Counter": "0,1,2,3",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x1",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "PublicDescription": "Number of integer move elimination candidate uops that were eliminated.",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x2",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "PublicDescription": "Number of SIMD move elimination candidate uops that were eliminated.",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x4",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "PublicDescription": "Number of integer move elimination candidate uops that were not eliminated.",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "Counter": "0,1,2,3",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "PublicDescription": "Number of SIMD move elimination candidate uops that were not eliminated.",
+ "SampleAfterValue": "1000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "This event counts cycles when the Reservation Station ( RS ) is empty for the thread. The RS is a structure that buffers allocated micro-ops from the Front-end. If there are many cycles when the RS is empty, it may represent an underflow of instructions delivered from the Front-end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "UMask": "0x1",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "UMask": "0x4",
+ "BriefDescription": "Stall cycles because IQ is full",
+ "Counter": "0,1,2,3",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "PublicDescription": "Stall cycles due to IQ is full.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x41",
+ "BriefDescription": "Not taken macro-conditional branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x81",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x82",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x84",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x88",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0x90",
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc1",
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc2",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc4",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xc8",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xd0",
+ "BriefDescription": "Speculative and retired direct near calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "UMask": "0xff",
+ "BriefDescription": "Speculative and retired branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x41",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x81",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x84",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0x88",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xc1",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xc4",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xff",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "Cycles which a uop is dispatched on port 0 in this thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x1",
+ "BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "Errata": "HSD135",
+ "PublicDescription": "Cycles allocation is stalled due to resource related reason.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "This event counts cycles during which no instructions were allocated because no Store Buffers (SB) were available.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "CounterMask": "1",
+ "Errata": "HSD78",
+ "PublicDescription": "Cycles with pending L2 miss loads. Set Cmask=2 to count cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "CounterMask": "8",
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with pending memory loads.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "CounterMask": "2",
+ "PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "CounterMask": "4",
+ "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls due to L2 cache misses.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "CounterMask": "5",
+ "PublicDescription": "Number of loads missed L2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls due to memory subsystem.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "CounterMask": "6",
+ "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline and there were memory instructions pending (waiting for data).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls due to L1 data cache misses",
+ "Counter": "2",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "CounterMask": "12",
+ "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xa8",
+ "UMask": "0x1",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of uops delivered by the LSD.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "Errata": "HSD30, HSM31",
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "HSD11, HSD140",
+ "PublicDescription": "Number of instructions at retirement.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x2",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x1",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "PEBS": "2",
+ "Counter": "1",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "Errata": "HSD140",
+ "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "1"
+ },
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Actually retired uops.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "CounterMask": "10",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x4",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x20",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x1",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PublicDescription": "Counts the number of conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Branch instructions at retirement.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x8",
+ "BriefDescription": "Return instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PublicDescription": "Counts the number of near return instructions retired.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x10",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x20",
+ "BriefDescription": "Taken branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near taken branches retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x40",
+ "BriefDescription": "Far branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "Number of far branches retired.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x0",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x4",
+ "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Count cases of saving new LBR",
+ "Counter": "0,1,2,3",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "UMask": "0xa0",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x20",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "Errata": "HSD144, HSD30, HSM31",
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "Errata": "HSD144, HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xe6",
+ "UMask": "0x1f",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x3",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterMask": "1",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xb1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Errata": "HSD30, HSM31",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
new file mode 100644
index 000000000000..9c00f8ef6a07
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
@@ -0,0 +1,484 @@
+[
+ {
+ "EventCode": "0x08",
+ "UMask": "0x1",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x2",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to demand load misses that caused 4K page walks in any TLB levels.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x4",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to demand load misses that caused 2M/4M page walks in any TLB levels.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x8",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x20",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "PublicDescription": "This event counts load operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x40",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "PublicDescription": "This event counts load operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x80",
+ "BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
+ "PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x1",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x2",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 4K page structure.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x4",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 2M/4M page structure.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x8",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks. (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x20",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "PublicDescription": "This event counts store operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x40",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "PublicDescription": "This event counts store operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x80",
+ "BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
+ "PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4f",
+ "UMask": "0x10",
+ "BriefDescription": "Cycle count for an Extended Page table walk.",
+ "Counter": "0,1,2,3",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x1",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Misses in ITLB that causes a page walk of any page size.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x2",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to misses in ITLB 4K page entries.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x4",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to misses in ITLB 2M/4M page entries.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x8",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x20",
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "PublicDescription": "ITLB misses that hit STLB (4K).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x40",
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "PublicDescription": "ITLB misses that hit STLB (2M).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xae",
+ "UMask": "0x1",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x11",
+ "BriefDescription": "Number of DTLB page walker hits in the L1+FB",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L1+FB.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x41",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x81",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x42",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x82",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x44",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L3.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x84",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x28",
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "Errata": "HSD25",
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x48",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBC",
+ "UMask": "0x88",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in memory.",
+ "Counter": "0,1,2,3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_MEMORY",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xBD",
+ "UMask": "0x1",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "UMask": "0x20",
+ "BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Count number of STLB flush attempts.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x60",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x60",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x60",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
new file mode 100644
index 000000000000..f1ee6d4853c5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -0,0 +1,1123 @@
+[
+ {
+ "PublicDescription": "Demand Data Read requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFOs that miss cache lines.",
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that miss cache lines",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFOs that hit cache lines in M state.",
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in M state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFOs that access cache lines in any state.",
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that access cache lines in any state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not rejected writebacks that missed LLC.",
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D miss oustandings duration in cycles",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Counts the number of lines brought into the L1 data cache.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding Demand Data Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles in which the L1D is locked.",
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1D is locked",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand data read requests sent to uncore.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand code read requests sent to uncore.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand and prefetch data reads",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with locked access.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load uops.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store uops.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops with L1 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops with L2 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source followed an L1 miss.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops that missed L2, excluding unknown sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source is LLC miss.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was an on-package core cache LLC hit and cross-core snoop missed.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was an on-package LLC hit and cross-core snoop hits.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was an on-package core cache with HitM responses.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Demand Data Read requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFO requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANS.RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache accesses when fetching instructions.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANS.CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Any MLC or LLC HW prefetch accessing L2, including rejects.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L1D writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANS.L1D_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 fill requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANS.L2_FILL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Transactions accessing L2 pipe.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in I state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in S state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in E state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Clean L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by demand",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Clean L2 cache lines evicted by the MLC prefetcher.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Dirty L2 cache lines evicted by the MLC prefetcher.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Dirty L2 cache lines filling the L2.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines filling the L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Split locks in SQ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all writebacks from the core to the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x18000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses. It also includes L2 hints sent to LLC to keep a line from being evicted out of the core caches",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts non-temporal stores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data reads ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand rfo's ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x000105B3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x000107F7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch) ",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
new file mode 100644
index 000000000000..950b62c0908e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
@@ -0,0 +1,151 @@
+[
+ {
+ "PublicDescription": "Counts number of X87 uops executed.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts number of SSE* or AVX-128 double precision FP scalar uops executed.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts 256-bit packed single-precision floating-point instructions.",
+ "EventCode": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "number of GSSE-256 Computational FP single precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts 256-bit packed double-precision floating-point instructions.",
+ "EventCode": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "number of AVX-256 Computational FP double precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of assists associated with 256-bit AVX store operations.",
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of X87 FP assists due to output values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of X87 FP assists due to input values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SIMD FP assists due to output values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1e",
+ "EventName": "FP_ASSIST.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
new file mode 100644
index 000000000000..de72b84b3536
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
@@ -0,0 +1,305 @@
+[
+ {
+ "PublicDescription": "Counts cycles the IDQ is empty.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "IDQ.EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles MITE is delivered at least one uops. Set Cmask = 1.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of uops delivered to IDQ from any path.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes UC accesses.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE.IFETCH_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Count issue pipeline slots where no uop was delivered from the front end to the back end when there is no back-end stall.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of DSB to MITE switches.",
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles DSB to MITE switches caused delay.",
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "DSB Fill encountered > 3 DSB lines.",
+ "EventCode": "0xAC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
new file mode 100644
index 000000000000..e1c6a1d4a4d5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
@@ -0,0 +1,236 @@
+[
+ {
+ "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative cache-line split Store-address uops dispatched to L1D.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.LLC_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of any page walk that had a miss in LLC.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 4.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads with latency value being above 4",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 8.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Loads with latency value being above 8",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 16.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Loads with latency value being above 16",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 32.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Loads with latency value being above 32",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 64.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2003",
+ "BriefDescription": "Loads with latency value being above 64",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 128.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1009",
+ "BriefDescription": "Loads with latency value being above 128",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 256.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "503",
+ "BriefDescription": "Loads with latency value being above 256",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 512.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "101",
+ "BriefDescription": "Loads with latency value being above 512",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the LLC and the data returned from dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3004003f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand code reads that miss the LLC and the data returned from dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6004001b3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts LLC replacements",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/other.json b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
new file mode 100644
index 000000000000..9c2dd0511a32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
@@ -0,0 +1,44 @@
+[
+ {
+ "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPL_CYCLES.RING0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
new file mode 100644
index 000000000000..2145c28193f7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -0,0 +1,1307 @@
+[
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired from execution.",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EdgeDetect": "1",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of flags-merge uops allocated. Such uops adds delay.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of flags-merge uops being allocated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles that the divider is active, includes INT and FP. Set 'edge =1, cmask=1' to count the number of divides.",
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when divider is busy executing divide operations",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Divide operations executed.",
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EdgeDetect": "1",
+ "EventName": "ARITH.FPU_DIV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Divide operations executed",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles the RS is empty for the thread.",
+ "EventCode": "0x5E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Stall cycles due to IQ is full.",
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stall cycles because IQ is full",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not taken macro-conditional branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x90",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired direct near calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired indirect calls.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc2",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc8",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd0",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired direct near calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles Allocation is stalled due to Resource Related reason.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles stalled due to no store buffers available (not including draining form sync).",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with pending L2 miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending memory loads.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Total execution stalls.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of loads missed L2.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to L2 cache misses.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to memory subsystem.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to L1 data cache misses",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of uops executed on the core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of instructions at retirement.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "CounterHTOff": "1"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of micro-ops retired, Use cmask=1 and invert to count active cycles or stalled cycles.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Actually retired uops. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of retirement slots used each cycle.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of self-modifying-code machine clears detected.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of executed AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of near return instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of near taken branches retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of far branches retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Far branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Mispredicted taken branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
new file mode 100644
index 000000000000..f036f5398906
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
@@ -0,0 +1,180 @@
+[
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "DTLB_LOAD_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Page walk for a large page completed for Demand load.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Miss in all TLB levels causes a page walk that completes of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles PMH is busy with this walk.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts load operations that missed 1st level DTLB but hit the 2nd level.",
+ "EventCode": "0x5F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all ITLB levels that cause page walks.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycle PMH is busy with a walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks in ITLB due to STLB load misses for large pages.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "ITLB_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Completed page walks in ITLB due to STLB load misses for large pages",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Count number of STLB flush attempts.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/cache.json b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
new file mode 100644
index 000000000000..ff27a620edd8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
@@ -0,0 +1,1272 @@
+[
+ {
+ "PublicDescription": "Demand Data Read requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all L2 HW prefetcher requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFOs that miss cache lines.",
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that miss cache lines",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFOs that hit cache lines in M state.",
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in M state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFOs that access cache lines in any state.",
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that access cache lines in any state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not rejected writebacks that missed LLC.",
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D miss oustandings duration in cycles",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Counts the number of lines brought into the L1 data cache.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding Demand Data Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles in which the L1D is locked.",
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1D is locked",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand data read requests sent to uncore.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand code read requests sent to uncore.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand and prefetch data reads",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with locked access.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load uops.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store uops.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops with L1 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops with L2 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source followed an L1 miss.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops that missed L2, excluding unknown sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source is LLC miss.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was an on-package core cache LLC hit and cross-core snoop missed.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was an on-package LLC hit and cross-core snoop hits.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was an on-package core cache with HitM responses.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops whose data source was LLC hit with no snoop required.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops whose data source was remote DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Remote cache HITM.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Data forwarded from remote cache.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Demand Data Read requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFO requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANS.RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache accesses when fetching instructions.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANS.CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Any MLC or LLC HW prefetch accessing L2, including rejects.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L1D writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANS.L1D_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 fill requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANS.L2_FILL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Transactions accessing L2 pipe.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in I state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in S state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines in E state filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache lines filling L2.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Clean L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by demand",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Clean L2 cache lines evicted by the MLC prefetcher.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Dirty L2 cache lines evicted by the MLC prefetcher.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Dirty L2 cache lines filling the L2.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines filling the L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Split locks in SQ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all writebacks from the core to the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x803c8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x23ffc08000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts non-temporal stores",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json b/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
new file mode 100644
index 000000000000..950b62c0908e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
@@ -0,0 +1,151 @@
+[
+ {
+ "PublicDescription": "Counts number of X87 uops executed.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts number of SSE* or AVX-128 double precision FP scalar uops executed.",
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts 256-bit packed single-precision floating-point instructions.",
+ "EventCode": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "number of GSSE-256 Computational FP single precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts 256-bit packed double-precision floating-point instructions.",
+ "EventCode": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "number of AVX-256 Computational FP double precision uops issued this cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of assists associated with 256-bit AVX store operations.",
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of X87 FP assists due to output values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of X87 FP assists due to input values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SIMD FP assists due to output values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1e",
+ "EventName": "FP_ASSIST.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
new file mode 100644
index 000000000000..de72b84b3536
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
@@ -0,0 +1,305 @@
+[
+ {
+ "PublicDescription": "Counts cycles the IDQ is empty.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "IDQ.EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts cycles MITE is delivered at least one uops. Set Cmask = 1.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of uops delivered to IDQ from any path.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes UC accesses.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE.IFETCH_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Count issue pipeline slots where no uop was delivered from the front end to the back end when there is no back-end stall.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of DSB to MITE switches.",
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles DSB to MITE switches caused delay.",
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "DSB Fill encountered > 3 DSB lines.",
+ "EventCode": "0xAC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/memory.json b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
new file mode 100644
index 000000000000..437d98f3e344
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
@@ -0,0 +1,503 @@
+[
+ {
+ "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative cache-line split Store-address uops dispatched to L1D.",
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 4.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads with latency value being above 4",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 8.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Loads with latency value being above 8",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 16.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Loads with latency value being above 16",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 32.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Loads with latency value being above 32",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 64.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2003",
+ "BriefDescription": "Loads with latency value being above 64",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 128.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1009",
+ "BriefDescription": "Loads with latency value being above 128",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 256.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "503",
+ "BriefDescription": "Loads with latency value being above 256",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 512.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "101",
+ "BriefDescription": "Loads with latency value being above 512",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc00244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67f800244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from remote dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x87f800244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data forwarded from remote cache",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hits the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc203f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6004003f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x87f8203f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data forwarded from remote cache",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x107fc003f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x600400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67f800004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x87f820004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x107fc00004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67fc00001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x600400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67f800001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x87f820001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x107fc00001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x107fc20002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the LLC and the data is found in M state in remote cache and forwarded from there.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67fc00010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x600400010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67f800010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x87f820010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x107fc00010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that miss in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/other.json b/tools/perf/pmu-events/arch/x86/ivytown/other.json
new file mode 100644
index 000000000000..9c2dd0511a32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/other.json
@@ -0,0 +1,44 @@
+[
+ {
+ "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPL_CYCLES.RING0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
new file mode 100644
index 000000000000..2145c28193f7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -0,0 +1,1307 @@
+[
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired from execution.",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EdgeDetect": "1",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of flags-merge uops allocated. Such uops adds delay.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of flags-merge uops being allocated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles that the divider is active, includes INT and FP. Set 'edge =1, cmask=1' to count the number of divides.",
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when divider is busy executing divide operations",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Divide operations executed.",
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EdgeDetect": "1",
+ "EventName": "ARITH.FPU_DIV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Divide operations executed",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x58",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles the RS is empty for the thread.",
+ "EventCode": "0x5E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Stall cycles due to IQ is full.",
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stall cycles because IQ is full",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not taken macro-conditional branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x90",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired direct near calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired indirect calls.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc2",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc8",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd0",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired direct near calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles Allocation is stalled due to Resource Related reason.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles stalled due to no store buffers available (not including draining form sync).",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with pending L2 miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with pending memory loads.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Total execution stalls.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of loads missed L2.",
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to L2 cache misses.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to memory subsystem.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls due to L1 data cache misses",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of uops executed on the core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of instructions at retirement.",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "CounterHTOff": "1"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of micro-ops retired, Use cmask=1 and invert to count active cycles or stalled cycles.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Actually retired uops. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of retirement slots used each cycle.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of self-modifying-code machine clears detected.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of executed AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Branch instructions at retirement.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of near return instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of near taken branches retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of far branches retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Far branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Mispredicted taken branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count cases of saving new LBR",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
new file mode 100644
index 000000000000..c8de548b78fa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
@@ -0,0 +1,198 @@
+[
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.DEMAND_LD_WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.DEMAND_LD_WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "DTLB_LOAD_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Page walk for a large page completed for Demand load.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Miss in all TLB levels causes a page walk that completes of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles PMH is busy with this walk.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts load operations that missed 1st level DTLB but hit the 2nd level.",
+ "EventCode": "0x5F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all ITLB levels that cause page walks.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycle PMH is busy with a walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Completed page walks in ITLB due to STLB load misses for large pages.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "ITLB_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Completed page walks in ITLB due to STLB load misses for large pages",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Count number of STLB flush attempts.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
new file mode 100644
index 000000000000..f723e8f7bb09
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -0,0 +1,1290 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with locked access.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load uops retired",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load uops.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of store uops retired.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store uops.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Data from remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier. ",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D.ALLOCATED_IN_M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Allocated L1D data cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D.EVICTION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D.ALL_M_REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D miss oustandings duration in cycles.",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1D is locked.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests sent to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cacheable and noncachaeble code read requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand and prefetch data reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that miss cache lines.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in E state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that access cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_S",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANS.RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANS.CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache accesses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANS.L1D_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L1D writebacks that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANS.L2_FILL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 fill requests that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Transactions accessing L2 pipe.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in I state filling L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in S state filling L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in E state filling L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines filling L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by demand.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines filling the L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Split locks in SQ.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from L2 hardware prefetchers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all writebacks from the core to the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x803c8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x23ffc08000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts non-temporal stores",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data reads",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand rfo's",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x000105B3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x000107F7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
new file mode 100644
index 000000000000..982eda48785e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
@@ -0,0 +1,138 @@
+[
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to Output values.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to input values.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1e",
+ "EventName": "FP_ASSIST.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
new file mode 100644
index 000000000000..1b7b1dd36c68
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
@@ -0,0 +1,305 @@
+[
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "IDQ.EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB_FILL.OTHER_CANCEL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering any Uop.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAC",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa",
+ "EventName": "DSB_FILL.ALL_CANCEL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/memory.json b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
new file mode 100644
index 000000000000..27e636428f4f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
@@ -0,0 +1,422 @@
+[
+ {
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads with latency value being above 4 .",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Loads with latency value being above 8.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Loads with latency value being above 16.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Loads with latency value being above 32.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2003",
+ "BriefDescription": "Loads with latency value being above 64.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1009",
+ "BriefDescription": "Loads with latency value being above 128.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "503",
+ "BriefDescription": "Loads with latency value being above 256.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "101",
+ "BriefDescription": "Loads with latency value being above 512.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x600400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67f800004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x87f820004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x107fc00004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67fc00001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x600400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67f800001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x87f820001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x107fc00001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67fc00010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x600400010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x67f800010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x87f820010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x107fc00010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fffc20080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x600400077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all local dram accesses for all demand and L2 prefetches. LLC prefetches are excluded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3FFFC20077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts all LLC misses for all demand and L2 prefetches. LLC prefetches are excluded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x187FC20077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.REMOTE_HITM_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts all remote cache-to-cache transfers (includes HITM and HIT-Forward) for all demand and L2 prefetches. LLC prefetches are excluded.",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/other.json b/tools/perf/pmu-events/arch/x86/jaketown/other.json
new file mode 100644
index 000000000000..64b195b82c50
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/other.json
@@ -0,0 +1,58 @@
+[
+ {
+ "EventCode": "0x17",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Valid instructions written to IQ per cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPL_CYCLES.RING0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HW_PRE_REQ.DL1_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
new file mode 100644
index 000000000000..8a597e45ed84
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -0,0 +1,1220 @@
+[
+ {
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired from execution.",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x90",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc2",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc8",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd0",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x90",
+ "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd0",
+ "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stall cycles because IQ is full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Multiply packed/scalar single precision uops allocated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RESOURCE_STALLS.LB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5B",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RESOURCE_STALLS2.BOB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x5E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count cases of saving new LBR.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of micro-ops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Actually retired uops.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Far branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted not taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired instructions experiencing ITLB misses.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of the divide operations executed.",
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "ARITH.FPU_DIV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Divide operations executed.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops dispatched per thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops dispatched from any thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "2",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "6",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "AGU_BYPASS_CANCEL.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "TakenAlone": "1",
+ "CounterHTOff": "1"
+ },
+ {
+ "EventCode": "0x5B",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls2 control structures full for physical registers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5B",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with either free list is empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "RESOURCE_STALLS.MEM_RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5B",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "RESOURCE_STALLS2.OOO_RSRC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls out of order resources full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa",
+ "EventName": "RESOURCE_STALLS.LB_SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EdgeDetect": "1",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
new file mode 100644
index 000000000000..a654ab771fce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
@@ -0,0 +1,149 @@
+[
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
new file mode 100644
index 000000000000..88ba5994b994
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -0,0 +1,2305 @@
+[
+ {
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "L2_REQUESTS_REJECT.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of MEC requests from the L2Q that reference a cache line (cacheable requests) exlcuding SW prefetches filling only to L2 cache and L1 evictions (automatically exlcudes L2HWP, UC, WC) that were rejected - Multiple repeated rejects should be counted multiple times"
+ },
+ {
+ "EventCode": "0x31",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "CORE_REJECT_L2Q.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of MEC requests that were not accepted into the L2Q because of any L2 queue reject condition. There is no concept of at-ret here. It might include requests due to instructions in the speculative path."
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "L2_REQUESTS.REFERENCE",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the total number of L2 cache references."
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "L2_REQUESTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of L2 cache misses"
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses. ",
+ "EventCode": "0x86",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses. "
+ },
+ {
+ "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted. ",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MEM_UOPS_RETIRED.L1_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of load micro-ops retired that miss in L1 D cache"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "MEM_UOPS_RETIRED.L2_HIT_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of load micro-ops retired that hit in the L2",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "MEM_UOPS_RETIRED.L2_MISS_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the number of load micro-ops retired that miss in the L2",
+ "Data_LA": "1"
+ },
+ {
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "MEM_UOPS_RETIRED.UTLB_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of load micro-ops retired that caused micro TLB miss"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "MEM_UOPS_RETIRED.HITM",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the loads retired that get the data from the other core in the same tile in M state",
+ "Data_LA": "1"
+ },
+ {
+ "PublicDescription": "This event counts the number of load micro-ops retired.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts all the load micro-ops retired"
+ },
+ {
+ "PublicDescription": "This event counts the number of store micro-ops retired.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts all the store micro-ops retired"
+ },
+ {
+ "EventCode": "0xB7",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts the matrix events specified by MSR_OFFCORE_RESPx"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x40000032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x10004032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x08004032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x10000832f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x08000832f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x00000132f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000003091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000403091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800403091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000083091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800083091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000008000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000408000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800408000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000088000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800088000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000018000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014800 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts all streaming stores (WC and should be programmed on PMC1) that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000014000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial streaming stores (WC and should be programmed on PMC1) that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000002000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000402000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800402000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000082000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800082000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000012000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000001000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000401000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800401000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000081000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800081000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000011000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010800 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Full streaming stores (WC and should be programmed on PMC1) that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000020020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that provides no supplier details",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000400001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800400001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000080001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0800080001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for any response",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002001000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002002000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002008000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002003091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x00020032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0002000070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004001000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004002000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004008000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004003091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x00040032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0004000070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008001000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008002000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008008000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008003091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0008000044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x00080032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010001000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010002000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010008000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010003091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x00100032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0010000070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800181000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800182000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800188000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800183091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x18001832f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800180070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800401000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800402000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800408000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800403091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x18004032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1800400070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json b/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
new file mode 100644
index 000000000000..6d38636689a4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
@@ -0,0 +1,34 @@
+[
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts all instruction fetches, including uncacheable fetches."
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts all instruction fetches that hit the instruction cache."
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts all instruction fetches that miss the instruction cache or produce memory requests. An instruction fetch miss is counted only once and not once for every cycle it is outstanding."
+ },
+ {
+ "EventCode": "0xE7",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MS_DECODED.MS_ENTRY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of times the MSROM starts a flow of uops."
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
new file mode 100644
index 000000000000..700652566200
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
@@ -0,0 +1,1110 @@
+[
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of times the machine clears due to memory ordering hazards"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x01004032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x00802032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x01010032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x00808032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100403091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080203091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101003091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080803091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100408000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080208000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101008000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080808000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100402000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080202000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101002000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080802000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100401000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080201000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101001000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080801000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_FAR",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_NEAR",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_FAR",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_NEAR",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000020080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from any NON_DRAM system address. This includes MMIO transactions",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2000020020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from any NON_DRAM system address. This includes MMIO transactions",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0100400001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080200001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0101000001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far. ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080800001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Local.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600100 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM",
+ "MSRIndex": "0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180601000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180608000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180603091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x01806032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0180600070 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from MCDRAM (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800001 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800002 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800004 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800020 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800040 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800080 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800200 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800400 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181801000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181802000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181808000 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181803091 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800022 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0181800044 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x01818032f7 ",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any Read request that accounts for responses from DDR (local and far)",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
new file mode 100644
index 000000000000..bb5494cfb5ae
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
@@ -0,0 +1,435 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x7e",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xfe",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps and predicted taken."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xf9",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near CALL branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xfd",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xfb",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xf7",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near RET branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xeb",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of branch instructions retired that were near indirect CALL or near indirect JMP."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xbf",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of far branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0x7e",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xfe",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps and predicted taken."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xfb",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xf7",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xeb",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were near indirect CALL or near indirect JMP."
+ },
+ {
+ "PublicDescription": "This event counts the number of micro-ops retired that were supplied from MSROM.",
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.MS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of micro-ops retired that are from the complex flows issued by the micro-sequencer (MS)."
+ },
+ {
+ "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. ",
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of micro-ops retired"
+ },
+ {
+ "PublicDescription": "This event counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops retired (floating point, integer and store) except for loads (memory-to-register mov-type micro ops), division, sqrt.",
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "UOPS_RETIRED.SCALAR_SIMD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops retired. More specifically, it counts scalar SSE, AVX, AVX2, AVX-512 micro-ops except for loads (memory-to-register mov-type micro ops), division, sqrt."
+ },
+ {
+ "PublicDescription": "This event counts the number of packed vector SSE, AVX, AVX2, and AVX-512 micro-ops retired (floating point, integer and store) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies.",
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "UOPS_RETIRED.PACKED_SIMD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of vector SSE, AVX, AVX2, AVX-512 micro-ops retired. More specifically, it counts packed SSE, AVX, AVX2, AVX-512 micro-ops (both floating point and integer) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies."
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of times that the machine clears due to program modifying data within 1K of a recently fetched code page"
+ },
+ {
+ "PublicDescription": "This event counts the number of times that the pipeline stalled due to FP operations needing assists.",
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of floating operations retired that required microcode assists"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts all nukes"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "NO_ALLOC_CYCLES.ROB_FULL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and the ROB is full"
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted branch to retire.",
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and the alloc pipe is stalled waiting for a mispredicted branch to retire."
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "NO_ALLOC_CYCLES.RAT_STALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted. "
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles when no uops are allocated, the instruction queue is empty and the alloc pipe is stalled waiting for instructions to be fetched.",
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x90",
+ "EventName": "NO_ALLOC_CYCLES.NOT_DELIVERED",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated, the IQ is empty, and no other condition is blocking allocation."
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x7f",
+ "EventName": "NO_ALLOC_CYCLES.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the total number of core cycles when no micro-ops are allocated for any reason."
+ },
+ {
+ "EventCode": "0xCB",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "RS_FULL_STALL.MEC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of core cycles when allocation pipeline is stalled and is waiting for a free MEC reservation station entry."
+ },
+ {
+ "EventCode": "0xCB",
+ "Counter": "0,1",
+ "UMask": "0x1f",
+ "EventName": "RS_FULL_STALL.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full. "
+ },
+ {
+ "EventCode": "0xC0",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the total number of instructions retired"
+ },
+ {
+ "PublicDescription": "This event counts cycles when the divider is busy. More specifically cycles when the divide unit is unable to accept a new divide uop because it is busy processing a previously dispatched uop. The cycles will be counted irrespective of whether or not another divide uop is waiting to enter the divide unit (from the RS). This event counts integer divides, x87 divides, divss, divsd, sqrtss, sqrtsd event and does not count vector divides.",
+ "EventCode": "0xCD",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider. "
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of unhalted core clock cycles"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of unhalted reference clock cycles"
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter\r\n",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "BACLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of times the front end resteers for any branch as a result of another branch handling mechanism in the front end."
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "BACLEARS.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of times the front end resteers for RET branches as a result of another branch handling mechanism in the front end."
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "BACLEARS.COND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of times the front end resteers for conditional branches as a result of another branch handling mechanism in the front end."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store ",
+ "Data_LA": "1"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "RECYCLEQ.LD_BLOCK_STD_NOTREADY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address overlaps with a store whose data is not ready"
+ },
+ {
+ "PublicDescription": "This event counts the number of retired store that experienced a cache line boundary split(Precise Event). Note that each spilt should be counted only once.",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "RECYCLEQ.ST_SPLITS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of occurences a retired store that is a cache line split. Each split should be counted only once."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "RECYCLEQ.LD_SPLITS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of occurences a retired load that is a cache line split. Each split should be counted only once.",
+ "Data_LA": "1"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "RECYCLEQ.LOCK",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts all the retired locked loads. It does not include stores because we would double count if we count stores"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "RECYCLEQ.STA_FULL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the store micro-ops retired that were pushed in the rehad queue because the store address buffer is full"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "RECYCLEQ.ANY_LD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts any retired load that was pushed into the recycle queue for any reason."
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "EventName": "RECYCLEQ.ANY_ST",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts any retired store that was pushed into the recycle queue for any reason."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xf9",
+ "EventName": "BR_MISP_RETIRED.CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted near CALL branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xfd",
+ "EventName": "BR_MISP_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted near relative CALL branch instructions retired."
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xbf",
+ "EventName": "BR_MISP_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted far branch instructions retired."
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
new file mode 100644
index 000000000000..f31594507f8c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
@@ -0,0 +1,65 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of load micro-ops retired that cause a DTLB miss",
+ "Data_LA": "1"
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_WALKS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total D-side page walks that are completed or started. The page walks started in the speculative path will also be counted",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the total number of core cycles for all the D-side page walks. The cycles for page walks started in speculative path will also be included."
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_WALKS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total I-side page walks that are completed.",
+ "EdgeDetect": "1"
+ },
+ {
+ "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress. ",
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the total number of core cycles for all the I-side page walks. The cycles for page walks started in speculative path will also be included."
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.WALKS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total page walks that are completed (I-side and D-side)",
+ "EdgeDetect": "1"
+ },
+ {
+ "PublicDescription": "This event counts every cycle when a data (D) page walk or instruction (I) page walk is in progress.",
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the total number of core cycles for all the page walks. The cycles for page walks started in speculative path will also be included."
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
new file mode 100644
index 000000000000..12181bb1da2a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -0,0 +1,35 @@
+Family-model,Version,Filename,EventType
+GenuineIntel-6-56,v5,broadwellde,core
+GenuineIntel-6-3D,v17,broadwell,core
+GenuineIntel-6-47,v17,broadwell,core
+GenuineIntel-6-4F,v10,broadwellx,core
+GenuineIntel-6-1C,v4,bonnell,core
+GenuineIntel-6-26,v4,bonnell,core
+GenuineIntel-6-27,v4,bonnell,core
+GenuineIntel-6-36,v4,bonnell,core
+GenuineIntel-6-35,v4,bonnell,core
+GenuineIntel-6-5C,v8,goldmont,core
+GenuineIntel-6-3C,v24,haswell,core
+GenuineIntel-6-45,v24,haswell,core
+GenuineIntel-6-46,v24,haswell,core
+GenuineIntel-6-3F,v17,haswellx,core
+GenuineIntel-6-3A,v18,ivybridge,core
+GenuineIntel-6-3E,v19,ivytown,core
+GenuineIntel-6-2D,v20,jaketown,core
+GenuineIntel-6-57,v9,knightslanding,core
+GenuineIntel-6-1E,v2,nehalemep,core
+GenuineIntel-6-1F,v2,nehalemep,core
+GenuineIntel-6-1A,v2,nehalemep,core
+GenuineIntel-6-2E,v2,nehalemex,core
+GenuineIntel-6-4E,v24,skylake,core
+GenuineIntel-6-5E,v24,skylake,core
+GenuineIntel-6-8E,v24,skylake,core
+GenuineIntel-6-9E,v24,skylake,core
+GenuineIntel-6-37,v13,silvermont,core
+GenuineIntel-6-4D,v13,silvermont,core
+GenuineIntel-6-4C,v13,silvermont,core
+GenuineIntel-6-2A,v15,sandybridge,core
+GenuineIntel-6-2C,v2,westmereep-dp,core
+GenuineIntel-6-2C,v2,westmereep-dp,core
+GenuineIntel-6-25,v2,westmereep-sp,core
+GenuineIntel-6-2F,v2,westmereex,core
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
new file mode 100644
index 000000000000..a11029efda2f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
@@ -0,0 +1,3229 @@
+[
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "CACHE_LOCK_CYCLES.L1D",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D locked"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CACHE_LOCK_CYCLES.L1D_L2",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D and L2 locked"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D.M_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines replaced in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D.M_REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines allocated in the M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D.M_SNOOP_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D snoop eviction of cache lines in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D.REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache lines allocated"
+ },
+ {
+ "EventCode": "0x43",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_ALL_REF.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All references to the L1 data cache"
+ },
+ {
+ "EventCode": "0x43",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_ALL_REF.CACHEABLE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cacheable reads and writes"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_CACHE_LD.E_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache read in E state"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_LD.I_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache read in I state (misses)"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D_CACHE_LD.M_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache read in M state"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0xf",
+ "EventName": "L1D_CACHE_LD.MESI",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache reads"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_CACHE_LD.S_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache read in S state"
+ },
+ {
+ "EventCode": "0x42",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_CACHE_LOCK.E_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache load locks in E state"
+ },
+ {
+ "EventCode": "0x42",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_LOCK.HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache load lock hits"
+ },
+ {
+ "EventCode": "0x42",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D_CACHE_LOCK.M_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache load locks in M state"
+ },
+ {
+ "EventCode": "0x42",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_CACHE_LOCK.S_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache load locks in S state"
+ },
+ {
+ "EventCode": "0x53",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_LOCK_FB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D load lock accepted in fill buffer"
+ },
+ {
+ "EventCode": "0x52",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D prefetch load lock accepted in fill buffer"
+ },
+ {
+ "EventCode": "0x41",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_CACHE_ST.E_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache stores in E state"
+ },
+ {
+ "EventCode": "0x41",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D_CACHE_ST.M_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache stores in M state"
+ },
+ {
+ "EventCode": "0x41",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_CACHE_ST.S_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache stores in S state"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_PREFETCH.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch misses"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_PREFETCH.REQUESTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_PREFETCH.TRIGGERS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests triggered"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D_WB_L2.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in E state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D_WB_L2.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in I state (misses)"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D_WB_L2.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in M state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L1D_WB_L2.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L1 writebacks to L2"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_WB_L2.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_DATA_RQSTS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data prefetches"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the S state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines alloacated"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the E state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the S state"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_LINES_OUT.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.IFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.IFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.IFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.LD_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_RQSTS.LD_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.LOADS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xaa",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PREFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PREFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.PREFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 prefetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.RFOS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO requests"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANSACTIONS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANSACTIONS.FILL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 fill transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANSACTIONS.IFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANSACTIONS.L1D_WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D writeback to L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANSACTIONS.LOAD",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 Load transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANSACTIONS.PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANSACTIONS.RFO",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANSACTIONS.WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 writeback to LLC transactions"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_WRITE.LOCK.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in E state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe0",
+ "EventName": "L2_WRITE.LOCK.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_WRITE.LOCK.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_WRITE.LOCK.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_WRITE.LOCK.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_WRITE.LOCK.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in S state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "L2_WRITE.RFO.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_WRITE.RFO.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_WRITE.RFO.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_WRITE.RFO.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_WRITE.RFO.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in S state"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Longest latency cache miss"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Longest latency cache reference"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_INST_RETIRED.LOADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a load (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_INST_RETIRED.STORES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a store (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_UNCORE_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Load instructions retired with a data source of local DRAM or locally homed remote hitm (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_UNCORE_RETIRED.OTHER_CORE_L2_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Load instructions retired that HIT modified data in sibling core (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Load instructions retired remote cache HIT data source (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_UNCORE_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_UNCORE_RETIRED.UNCACHEABLE",
+ "SampleAfterValue": "4000",
+ "BriefDescription": "Load instructions retired IO (Precise Event)"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore L1 data cache writebacks"
+ },
+ {
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_SQ_FULL",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests blocked due to Super Queue full"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue lock splits across a cache line"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "STORE_BLOCKS.AT_RET",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Loads delayed with at-Retirement block code"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "STORE_BLOCKS.L1D_BLOCK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cacheable loads delayed with L1D block code"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x0",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x400",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100",
+ "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1000",
+ "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5",
+ "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x800",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50",
+ "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "500",
+ "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5000",
+ "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "3",
+ "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50000",
+ "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x1000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20",
+ "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "200",
+ "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x2000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10",
+ "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F11",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF11",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x111",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x211",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x411",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x711",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4711",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F44",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF44",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x144",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x244",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x444",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x744",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4744",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7FFF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFFFF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x80FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x47FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x18FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x38FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x10FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F22",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF22",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x122",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x222",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x422",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x722",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4722",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F08",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF08",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore writebacks",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x108",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x408",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x708",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4708",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F77",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF77",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore code or data read requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x177",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x277",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x477",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x777",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4777",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F33",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any cache_dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF33",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any location",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x133",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x233",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x433",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x733",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = local cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4733",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = local cache or dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = remote cache or dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F03",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF03",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand data requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x103",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x203",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x403",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x703",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4703",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F01",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF01",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x101",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x201",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x401",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x701",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4701",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F04",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF04",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x104",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x204",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x404",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x704",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4704",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F02",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF02",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x102",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x202",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x402",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x702",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4702",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F80",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF80",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore other requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x180",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x280",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x480",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x780",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4780",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F30",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF30",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch data requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x130",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x230",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x430",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x730",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4730",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F10",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF10",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x110",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x210",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x410",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x710",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4710",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F40",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF40",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x140",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x240",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x440",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x740",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4740",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F20",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF20",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x120",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x220",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x420",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x720",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4720",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F70",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF70",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x170",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x270",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x470",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x770",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4770",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
new file mode 100644
index 000000000000..7d2f71a9dee3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
@@ -0,0 +1,229 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_ASSIST.ALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.INPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.OUTPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_COMP_OPS_EXE.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MMX Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP double precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE and SSE2 FP Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP packed Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP scalar Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP single precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE2 integer Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Computational floating-point operations executed"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_MMX_TRANS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All Floating Point to and from MMX transitions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_MMX_TRANS.TO_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from MMX to Floating Point instructions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_MMX_TRANS.TO_MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from Floating Point to MMX instructions"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_128.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer pack operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_128.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer arithmetic operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_128.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer logical operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_128.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer multiply operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_128.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shift operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_128.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shuffle/move operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_128.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer unpack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_64.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit pack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_64.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit arithmetic operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_64.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit logical operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_64.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit packed multiply operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_64.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shift operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_64.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shuffle/move operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_64.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit unpack operations"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json b/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
new file mode 100644
index 000000000000..e5e21e03444d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
@@ -0,0 +1,26 @@
+[
+ {
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions decoded"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.FUSIONS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused instructions decoded"
+ },
+ {
+ "EventCode": "0x19",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TWO_UOP_INSTS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Two Uop instructions decoded"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
new file mode 100644
index 000000000000..f914a4525b65
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
@@ -0,0 +1,739 @@
+[
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x60FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF8FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x40FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x20FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any LLC miss",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/other.json b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
new file mode 100644
index 000000000000..af0860622445
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
@@ -0,0 +1,210 @@
+[
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Early Branch Prediciton Unit clears"
+ },
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Late Branch Prediction Unit clears"
+ },
+ {
+ "EventCode": "0xE5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch prediction unit missed call or return"
+ },
+ {
+ "EventCode": "0xD5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ES_REG_RENAMES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ES segment renames"
+ },
+ {
+ "EventCode": "0x6C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IO_TRANSACTIONS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "I/O transactions"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch stall cycles"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch hits"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch misses"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I Instruction fetches"
+ },
+ {
+ "EventCode": "0x82",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Large ITLB hit"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "LOAD_DISPATCH.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All loads dispatched"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "LOAD_DISPATCH.MOB",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_DISPATCH.RS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched that bypass the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_DISPATCH.RS_DELAYED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from stage 305"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PARTIAL_ADDRESS_ALIAS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "False dependencies due to partial address aliasing"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All RAT stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Flag stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Partial register stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB read port stalls cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Scoreboard stall cycles"
+ },
+ {
+ "EventCode": "0x4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "SB_DRAIN.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All Store buffer stall cycles"
+ },
+ {
+ "EventCode": "0xD4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SEG_RENAME_STALLS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Segment rename stall cycles"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SNOOP_RESPONSE.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HIT to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SNOOP_RESPONSE.HITE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITE to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SNOOP_RESPONSE.HITM",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITM to snoop"
+ },
+ {
+ "EventCode": "0xF6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SQ_FULL_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue full stall cycles"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json b/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
new file mode 100644
index 000000000000..41006ddcd893
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
@@ -0,0 +1,881 @@
+[
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.CYCLES_DIV_BUSY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles the divider is busy"
+ },
+ {
+ "EventCode": "0x14",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.DIV",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Divide Operations executed",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ARITH.MUL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Multiply operations executed"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BACLEAR.BAD_TARGET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted with bad target address"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR.CLEAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted, regardless of cause "
+ },
+ {
+ "EventCode": "0xA7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR_FORCE_IQ",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction queue forced BACLEAR"
+ },
+ {
+ "EventCode": "0xE0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch instructions decoded"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_INST_EXEC.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_EXEC.COND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Conditional branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_EXEC.DIRECT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Unconditional branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Unconditional call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_INST_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_INST_EXEC.NON_CALLS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect return branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_EXEC.TAKEN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired conditional branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Retired near call instructions (Precise Event)"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_MISP_EXEC.ANY",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_EXEC.COND",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted conditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_EXEC.DIRECT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted unconditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_MISP_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_MISP_EXEC.NON_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_MISP_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted return branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_MISP_EXEC.TAKEN",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted near retired calls (Precise Event)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reference cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_P",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total CPU cycles",
+ "CounterMask": "2"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "ILD_STALL.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Any Instruction Length Decoder stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction Queue full stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Length Change Prefix stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ILD_STALL.MRU",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stall cycles due to BPU MRU bypass"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "ILD_STALL.REGEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Regen stall cycles"
+ },
+ {
+ "EventCode": "0x18",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_DECODED.DEC0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions that must be decoded by decoder 0"
+ },
+ {
+ "EventCode": "0x1E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles instructions are written to the instruction queue"
+ },
+ {
+ "EventCode": "0x17",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions written to instruction queue."
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (fixed counter)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (Programmable counter and Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "INST_RETIRED.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired MMX instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired floating-point operations (Precise Event)"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Load operations conflicting with software prefetches"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xA8",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.INACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD_OVERFLOW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loops that can't stream from the instruction queue"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Cycles machine clear asserted"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEM_ORDER",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Self-Modifying Code detected"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "RESOURCE_STALLS.FPCW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "FPU control word write stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RESOURCE_STALLS.LOAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Load buffer stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RESOURCE_STALLS.MXCSR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MXCSR rename stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "RESOURCE_STALLS.OTHER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Other Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reservation Station full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.STORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Store buffer stall cycles"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)"
+ },
+ {
+ "EventCode": "0xDB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOP_UNFUSION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uop unfusions due to FP exceptions"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DECODED.ESP_FOLDING",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer instructions decoded"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DECODED.ESP_SYNC",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer sync operations"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops decoded by Microcode Sequencer",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xD1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DECODED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops are decoded",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on any port (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.PORT0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 0"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 0, 1 or 5"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.PORT1",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT2_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 2 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT234_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 2, 3 or 4"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT3_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 3 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT4_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 4 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED.PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 5"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued on any thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops were issued on either thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Fused Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are being retired",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retirement slots used (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
new file mode 100644
index 000000000000..0596094e0ee9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
@@ -0,0 +1,109 @@
+[
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load misses"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_LOAD_MISSES.PDE_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss caused by low part of address"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "DTLB second level hit"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss page walks complete"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB first level misses but second level hit"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB miss page walks"
+ },
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_FLUSH",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ITLB flushes"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISS_RETIRED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss page walks"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss the DTLB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_STORE_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired stores that miss the DTLB (Precise Event)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
new file mode 100644
index 000000000000..21a0f8fd057e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
@@ -0,0 +1,3184 @@
+[
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "CACHE_LOCK_CYCLES.L1D",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D locked"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CACHE_LOCK_CYCLES.L1D_L2",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D and L2 locked"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D.M_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines replaced in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D.M_REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines allocated in the M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D.M_SNOOP_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D snoop eviction of cache lines in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D.REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache lines allocated"
+ },
+ {
+ "EventCode": "0x43",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_ALL_REF.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All references to the L1 data cache"
+ },
+ {
+ "EventCode": "0x43",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_ALL_REF.CACHEABLE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cacheable reads and writes"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_CACHE_LD.E_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache read in E state"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_LD.I_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache read in I state (misses)"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D_CACHE_LD.M_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache read in M state"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0xf",
+ "EventName": "L1D_CACHE_LD.MESI",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache reads"
+ },
+ {
+ "EventCode": "0x40",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_CACHE_LD.S_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache read in S state"
+ },
+ {
+ "EventCode": "0x42",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_CACHE_LOCK.E_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache load locks in E state"
+ },
+ {
+ "EventCode": "0x42",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_LOCK.HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache load lock hits"
+ },
+ {
+ "EventCode": "0x42",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D_CACHE_LOCK.M_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache load locks in M state"
+ },
+ {
+ "EventCode": "0x42",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_CACHE_LOCK.S_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache load locks in S state"
+ },
+ {
+ "EventCode": "0x53",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_LOCK_FB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D load lock accepted in fill buffer"
+ },
+ {
+ "EventCode": "0x52",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D prefetch load lock accepted in fill buffer"
+ },
+ {
+ "EventCode": "0x41",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_CACHE_ST.E_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache stores in E state"
+ },
+ {
+ "EventCode": "0x41",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D_CACHE_ST.M_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache stores in M state"
+ },
+ {
+ "EventCode": "0x41",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_CACHE_ST.S_STATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache stores in S state"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_PREFETCH.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch misses"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_PREFETCH.REQUESTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_PREFETCH.TRIGGERS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests triggered"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D_WB_L2.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in E state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D_WB_L2.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in I state (misses)"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D_WB_L2.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in M state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L1D_WB_L2.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L1 writebacks to L2"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_WB_L2.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_DATA_RQSTS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data prefetches"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the S state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines alloacated"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the E state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the S state"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_LINES_OUT.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.IFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.IFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.IFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.LD_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_RQSTS.LD_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.LOADS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xaa",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PREFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PREFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.PREFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 prefetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.RFOS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO requests"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANSACTIONS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANSACTIONS.FILL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 fill transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANSACTIONS.IFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANSACTIONS.L1D_WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D writeback to L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANSACTIONS.LOAD",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 Load transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANSACTIONS.PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANSACTIONS.RFO",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANSACTIONS.WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 writeback to LLC transactions"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_WRITE.LOCK.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in E state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe0",
+ "EventName": "L2_WRITE.LOCK.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_WRITE.LOCK.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_WRITE.LOCK.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_WRITE.LOCK.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_WRITE.LOCK.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in S state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "L2_WRITE.RFO.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_WRITE.RFO.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_WRITE.RFO.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_WRITE.RFO.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_WRITE.RFO.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in S state"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Longest latency cache miss"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Longest latency cache reference"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_INST_RETIRED.LOADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a load (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_INST_RETIRED.STORES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a store (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore L1 data cache writebacks"
+ },
+ {
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_SQ_FULL",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests blocked due to Super Queue full"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue lock splits across a cache line"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "STORE_BLOCKS.AT_RET",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Loads delayed with at-Retirement block code"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "STORE_BLOCKS.L1D_BLOCK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cacheable loads delayed with L1D block code"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x0",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x400",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100",
+ "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1000",
+ "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5",
+ "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x800",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50",
+ "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "500",
+ "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5000",
+ "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "3",
+ "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50000",
+ "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x1000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20",
+ "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "200",
+ "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x2000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10",
+ "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F11",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF11",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x111",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x211",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x411",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x711",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4711",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F44",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF44",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x144",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x244",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x444",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x744",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4744",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7FFF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFFFF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x80FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x47FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x18FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x38FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x10FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F22",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF22",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x122",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x222",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x422",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x722",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4722",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F08",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF08",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore writebacks",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x108",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x408",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x708",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4708",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F77",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF77",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore code or data read requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x177",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x277",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x477",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x777",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4777",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F33",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any cache_dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF33",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any location",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x133",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x233",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x433",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x733",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = local cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4733",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = local cache or dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = remote cache or dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F03",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF03",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand data requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x103",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x203",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x403",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x703",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4703",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F01",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF01",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x101",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x201",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x401",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x701",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4701",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F04",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF04",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x104",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x204",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x404",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x704",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4704",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F02",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF02",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x102",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x202",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x402",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x702",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4702",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F80",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF80",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore other requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x180",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x280",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x480",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x780",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4780",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F30",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF30",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch data requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x130",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x230",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x430",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x730",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4730",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F10",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF10",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x110",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x210",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x410",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x710",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4710",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F40",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF40",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x140",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x240",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x440",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x740",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4740",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F20",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF20",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x120",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x220",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x420",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x720",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4720",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F70",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF70",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x170",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x270",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x470",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x770",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4770",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
new file mode 100644
index 000000000000..7d2f71a9dee3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
@@ -0,0 +1,229 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_ASSIST.ALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.INPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.OUTPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_COMP_OPS_EXE.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MMX Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP double precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE and SSE2 FP Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP packed Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP scalar Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP single precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE2 integer Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Computational floating-point operations executed"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_MMX_TRANS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All Floating Point to and from MMX transitions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_MMX_TRANS.TO_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from MMX to Floating Point instructions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_MMX_TRANS.TO_MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from Floating Point to MMX instructions"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_128.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer pack operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_128.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer arithmetic operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_128.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer logical operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_128.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer multiply operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_128.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shift operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_128.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shuffle/move operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_128.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer unpack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_64.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit pack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_64.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit arithmetic operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_64.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit logical operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_64.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit packed multiply operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_64.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shift operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_64.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shuffle/move operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_64.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit unpack operations"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json b/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
new file mode 100644
index 000000000000..e5e21e03444d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
@@ -0,0 +1,26 @@
+[
+ {
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions decoded"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.FUSIONS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused instructions decoded"
+ },
+ {
+ "EventCode": "0x19",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TWO_UOP_INSTS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Two Uop instructions decoded"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
new file mode 100644
index 000000000000..f914a4525b65
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
@@ -0,0 +1,739 @@
+[
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x60FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF8FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x40FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x20FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any LLC miss",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/other.json b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
new file mode 100644
index 000000000000..af0860622445
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
@@ -0,0 +1,210 @@
+[
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Early Branch Prediciton Unit clears"
+ },
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Late Branch Prediction Unit clears"
+ },
+ {
+ "EventCode": "0xE5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch prediction unit missed call or return"
+ },
+ {
+ "EventCode": "0xD5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ES_REG_RENAMES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ES segment renames"
+ },
+ {
+ "EventCode": "0x6C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IO_TRANSACTIONS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "I/O transactions"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch stall cycles"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch hits"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch misses"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I Instruction fetches"
+ },
+ {
+ "EventCode": "0x82",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Large ITLB hit"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "LOAD_DISPATCH.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All loads dispatched"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "LOAD_DISPATCH.MOB",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_DISPATCH.RS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched that bypass the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_DISPATCH.RS_DELAYED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from stage 305"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PARTIAL_ADDRESS_ALIAS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "False dependencies due to partial address aliasing"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All RAT stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Flag stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Partial register stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB read port stalls cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Scoreboard stall cycles"
+ },
+ {
+ "EventCode": "0x4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "SB_DRAIN.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All Store buffer stall cycles"
+ },
+ {
+ "EventCode": "0xD4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SEG_RENAME_STALLS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Segment rename stall cycles"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SNOOP_RESPONSE.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HIT to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SNOOP_RESPONSE.HITE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITE to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SNOOP_RESPONSE.HITM",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITM to snoop"
+ },
+ {
+ "EventCode": "0xF6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SQ_FULL_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue full stall cycles"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json b/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
new file mode 100644
index 000000000000..41006ddcd893
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
@@ -0,0 +1,881 @@
+[
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.CYCLES_DIV_BUSY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles the divider is busy"
+ },
+ {
+ "EventCode": "0x14",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.DIV",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Divide Operations executed",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ARITH.MUL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Multiply operations executed"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BACLEAR.BAD_TARGET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted with bad target address"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR.CLEAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted, regardless of cause "
+ },
+ {
+ "EventCode": "0xA7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR_FORCE_IQ",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction queue forced BACLEAR"
+ },
+ {
+ "EventCode": "0xE0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch instructions decoded"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_INST_EXEC.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_EXEC.COND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Conditional branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_EXEC.DIRECT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Unconditional branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Unconditional call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_INST_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_INST_EXEC.NON_CALLS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect return branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_EXEC.TAKEN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired conditional branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Retired near call instructions (Precise Event)"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_MISP_EXEC.ANY",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_EXEC.COND",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted conditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_EXEC.DIRECT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted unconditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_MISP_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_MISP_EXEC.NON_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_MISP_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted return branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_MISP_EXEC.TAKEN",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted near retired calls (Precise Event)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reference cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_P",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total CPU cycles",
+ "CounterMask": "2"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "ILD_STALL.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Any Instruction Length Decoder stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction Queue full stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Length Change Prefix stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ILD_STALL.MRU",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stall cycles due to BPU MRU bypass"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "ILD_STALL.REGEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Regen stall cycles"
+ },
+ {
+ "EventCode": "0x18",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_DECODED.DEC0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions that must be decoded by decoder 0"
+ },
+ {
+ "EventCode": "0x1E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles instructions are written to the instruction queue"
+ },
+ {
+ "EventCode": "0x17",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions written to instruction queue."
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (fixed counter)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (Programmable counter and Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "INST_RETIRED.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired MMX instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired floating-point operations (Precise Event)"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Load operations conflicting with software prefetches"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xA8",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.INACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD_OVERFLOW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loops that can't stream from the instruction queue"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Cycles machine clear asserted"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEM_ORDER",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Self-Modifying Code detected"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "RESOURCE_STALLS.FPCW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "FPU control word write stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RESOURCE_STALLS.LOAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Load buffer stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RESOURCE_STALLS.MXCSR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MXCSR rename stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "RESOURCE_STALLS.OTHER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Other Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reservation Station full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.STORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Store buffer stall cycles"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)"
+ },
+ {
+ "EventCode": "0xDB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOP_UNFUSION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uop unfusions due to FP exceptions"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DECODED.ESP_FOLDING",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer instructions decoded"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DECODED.ESP_SYNC",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer sync operations"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops decoded by Microcode Sequencer",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xD1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DECODED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops are decoded",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on any port (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.PORT0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 0"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 0, 1 or 5"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.PORT1",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT2_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 2 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT234_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 2, 3 or 4"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT3_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 3 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT4_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 4 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED.PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 5"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued on any thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops were issued on either thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Fused Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are being retired",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retirement slots used (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
new file mode 100644
index 000000000000..0596094e0ee9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
@@ -0,0 +1,109 @@
+[
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load misses"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_LOAD_MISSES.PDE_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss caused by low part of address"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "DTLB second level hit"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss page walks complete"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB first level misses but second level hit"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB miss page walks"
+ },
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_FLUSH",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ITLB flushes"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISS_RETIRED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss page walks"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss the DTLB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_STORE_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired stores that miss the DTLB (Precise Event)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
new file mode 100644
index 000000000000..bef73c499f83
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
@@ -0,0 +1,1879 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with locked access.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load uops retired",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load uops.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of store uops retired.",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store uops.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops.",
+ "EventCode": "0xD4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier. ",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D.ALLOCATED_IN_M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Allocated L1D data cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D.EVICTION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D.ALL_M_REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D miss oustandings duration in cycles.",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1D is locked.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests sent to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cacheable and noncachaeble code read requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand and prefetch data reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that miss cache lines.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in E state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that hit cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFOs that access cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_S",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANS.RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANS.CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache accesses when fetching instructions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANS.L1D_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L1D writebacks that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANS.L2_FILL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 fill requests that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Transactions accessing L2 pipe.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in I state filling L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in S state filling L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines in E state filling L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines filling L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by demand.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Dirty L2 cache lines filling the L2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Split locks in SQ.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from L2 hardware prefetchers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "2",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0240",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0240",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0240",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0240",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0240",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0120",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch RFOs that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0120",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0120",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0120",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0120",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c03f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "COREWB & ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x18000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses. It also includes L2 hints sent to LLC to keep a line from being evicted out of the core caches.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x803c8000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2380408000",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) data reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3f803c0100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the LLC.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003c0100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10003c0100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003c0100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003c0100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10400",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10800",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts non-temporal stores.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data reads .",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand rfo's .",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x000105B3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00010122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs .",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x000107F7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch) .",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10433",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_M.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
new file mode 100644
index 000000000000..982eda48785e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
@@ -0,0 +1,138 @@
+[
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to Output values.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of SIMD FP assists due to input values.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1e",
+ "EventName": "FP_ASSIST.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
new file mode 100644
index 000000000000..1b7b1dd36c68
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
@@ -0,0 +1,305 @@
+[
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "IDQ.EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB_FILL.OTHER_CANCEL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering any Uop.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAC",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa",
+ "EventName": "DSB_FILL.ALL_CANCEL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3c",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
new file mode 100644
index 000000000000..e6dfa89d00f3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
@@ -0,0 +1,445 @@
+[
+ {
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads with latency value being above 4 .",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Loads with latency value being above 8.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Loads with latency value being above 16.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Loads with latency value being above 32.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2003",
+ "BriefDescription": "Loads with latency value being above 64.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1009",
+ "BriefDescription": "Loads with latency value being above 128.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "503",
+ "BriefDescription": "Loads with latency value being above 256.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "101",
+ "BriefDescription": "Loads with latency value being above 512.",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xCD",
+ "Counter": "3",
+ "UMask": "0x2",
+ "EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
+ "PRECISE_STORE": "1",
+ "TakenAlone": "1",
+ "CounterHTOff": "3"
+ },
+ {
+ "EventCode": "0xBE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.LLC_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x05",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400091",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400240",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch code reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400090",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch data reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400120",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch RFOs that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3004003f7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand code reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data writes (RFOs) that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x300400100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the LLC and the data returned from dram.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts all data requests (demand/prefetch data reads and demand data writes (RFOs) that miss the LLC where the data is returned from local DRAM",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6004001b3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts LLC replacements.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts any requests that miss the LLC where the data was returned from local DRAM",
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1f80408fff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_MISS_LOCAL.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x17004001b3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS_LOCAL.ANY_LLC_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1f80400004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_MISS_LOCAL.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1f80400010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_MISS_LOCAL.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1f80400040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_MISS_LOCAL.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1f80400080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.LLC_MISS_LOCAL.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1f80400200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Offcore": "1",
+ "EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.LLC_MISS_LOCAL.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": " REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/other.json b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
new file mode 100644
index 000000000000..64b195b82c50
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
@@ -0,0 +1,58 @@
+[
+ {
+ "EventCode": "0x17",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Valid instructions written to IQ per cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPL_CYCLES.RING0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "CPL_CYCLES.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPL_CYCLES.RING123",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HW_PRE_REQ.DL1_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
new file mode 100644
index 000000000000..8a597e45ed84
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
@@ -0,0 +1,1220 @@
+[
+ {
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. ",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired from execution.",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. ",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x90",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc2",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc8",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd0",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x84",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x90",
+ "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa0",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc1",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc4",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd0",
+ "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted direct near calls.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stall cycles because IQ is full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Multiply packed/scalar single precision uops allocated.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RESOURCE_STALLS.LB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5B",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RESOURCE_STALLS2.BOB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x5E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count cases of saving new LBR.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of micro-ops retired.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Actually retired uops.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Far branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted not taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired instructions experiencing ITLB misses.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of the divide operations executed.",
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "ARITH.FPU_DIV",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Divide operations executed.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops dispatched per thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops dispatched from any thread.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "2",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "2",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "6",
+ "CounterHTOff": "2"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "AGU_BYPASS_CANCEL.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "AnyThread": "1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "TakenAlone": "1",
+ "CounterHTOff": "1"
+ },
+ {
+ "EventCode": "0x5B",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls2 control structures full for physical registers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5B",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with either free list is empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "RESOURCE_STALLS.MEM_RS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5B",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "RESOURCE_STALLS2.OOO_RSRC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls out of order resources full.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xa",
+ "EventName": "RESOURCE_STALLS.LB_SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
+ "EventCode": "0x59",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EdgeDetect": "1",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xc3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
new file mode 100644
index 000000000000..a654ab771fce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
@@ -0,0 +1,149 @@
+[
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
new file mode 100644
index 000000000000..0bd1bc5302de
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -0,0 +1,811 @@
+[
+ {
+ "PublicDescription": "This event counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBS (L2 misses) and WOB (L2 write-back victims).",
+ "EventCode": "0x30",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "L2_REJECT_XQ.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of request from the L2 that were not accepted into the XQ"
+ },
+ {
+ "PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2Q due to a full or nearly full w condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core?s dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event.)",
+ "EventCode": "0x31",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "CORE_REJECT_L2Q.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of request that were not accepted into the L2Q because the L2Q is FULL."
+ },
+ {
+ "PublicDescription": "This event counts requests originating from the core that references a cache line in the L2 cache.",
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache requests from this core"
+ },
+ {
+ "PublicDescription": "This event counts the total number of L2 cache references and the number of L2 cache misses respectively.",
+ "EventCode": "0x2E",
+ "Counter": "0,1",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache request misses"
+ },
+ {
+ "EventCode": "0x86",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of cycles the NIP stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses."
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retired loads that were prohibited from receiving forwarded data from the store because of address mismatch.",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "REHABQ.LD_BLOCK_ST_FORWARD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked due to store forward restriction"
+ },
+ {
+ "PublicDescription": "This event counts the cases where a forward was technically possible, but did not occur because the store data was not available at the right time.",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "REHABQ.LD_BLOCK_STD_NOTREADY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads blocked due to store data not ready"
+ },
+ {
+ "PublicDescription": "This event counts the number of retire stores that experienced cache line boundary splits.",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "REHABQ.ST_SPLITS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Store uops that split cache line boundary"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retire loads that experienced cache line boundary splits.",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "REHABQ.LD_SPLITS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Load uops that split cache line boundary"
+ },
+ {
+ "PublicDescription": "This event counts the number of retired memory operations with lock semantics. These are either implicit locked instructions such as the XCHG instruction or instructions with an explicit LOCK prefix (0xF0).",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "REHABQ.LOCK",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Uops with lock semantics"
+ },
+ {
+ "PublicDescription": "This event counts the number of retired stores that are delayed because there is not a store address buffer available.",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "REHABQ.STA_FULL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Store address buffer full"
+ },
+ {
+ "PublicDescription": "This event counts the number of load uops reissued from Rehabq.",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "REHABQ.ANY_LD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Any reissued load uops"
+ },
+ {
+ "PublicDescription": "This event counts the number of store uops reissued from Rehabq.",
+ "EventCode": "0x03",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "EventName": "REHABQ.ANY_ST",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Any reissued store uops"
+ },
+ {
+ "PublicDescription": "This event counts the number of load ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MEM_UOPS_RETIRED.L1_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads missed L1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load ops retired that hit in the L2.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "MEM_UOPS_RETIRED.L2_HIT_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads hit L2"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load ops retired that miss in the L2.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "MEM_UOPS_RETIRED.L2_MISS_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Loads missed L2"
+ },
+ {
+ "PublicDescription": "This event counts the number of load ops retired that had UTLB miss.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "MEM_UOPS_RETIRED.UTLB_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads missed UTLB"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load ops retired that got data from the other core or from the other module.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "MEM_UOPS_RETIRED.HITM",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cross core or cross module hitm"
+ },
+ {
+ "PublicDescription": "This event counts the number of load ops retired.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x40",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All Loads"
+ },
+ {
+ "PublicDescription": "This event counts the number of store ops retired.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All Stores"
+ },
+ {
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventCode": "0xB7",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000044",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000044",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any code reads (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000044",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000044",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010044",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any code reads (demand & prefetch) that have any response type.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000022",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000022",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000022",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000022",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010022",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that have any response type.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680003091",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data read (demand & prefetch) that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000003091",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data read (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400003091",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data read (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200003091",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data read (demand & prefetch) that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000013091",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any data read (demand & prefetch) that have any response type.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680004800",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts streaming store that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000008008",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400008008",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200008008",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000018008",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts any request that have any response type.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680002000",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000002000",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts DCU hardware prefetcher data read that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400002000",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200002000",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000012000",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts DCU hardware prefetcher data read that have any response type.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000100",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Countsof demand RFO requests to write to partial cache lines that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000080",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand reads of partial cache lines (including UC and WC) that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000040",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000040",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000040",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000020",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000020",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts RFO requests generated by L2 prefetchers that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000020",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000020",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000010",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000010",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000010",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000010",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000008",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts writeback (modified to exclusive) that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0080000008",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts writeback (modified to exclusive) that miss L2 with no details on snoop-related information.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000004",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000004",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000004",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000004",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010004",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that have any response type.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000002",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000002",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000002",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000002",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000002",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4000000001",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch data read that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1680000001",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch data read that miss L2.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1000000001",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch data read that hit in the other module where modified copies were found in other core's L1 cache.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0400000001",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch data read that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0200000001",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch data read that miss L2 with a snoop miss response.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x0000010001",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts demand and DCU prefetch data read that have any response type.",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/frontend.json b/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
new file mode 100644
index 000000000000..204473badf5a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
@@ -0,0 +1,47 @@
+[
+ {
+ "PublicDescription": "This event counts all instruction fetches, not including most uncacheable\r\nfetches.",
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetches"
+ },
+ {
+ "PublicDescription": "This event counts all instruction fetches from the instruction cache.",
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetches from Icache"
+ },
+ {
+ "PublicDescription": "This event counts all instruction fetches that miss the Instruction cache or produce memory requests. This includes uncacheable fetches. An instruction fetch miss is counted only once and not once for every cycle it is outstanding.",
+ "EventCode": "0x80",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Icache miss"
+ },
+ {
+ "PublicDescription": "Counts the number of times the MSROM starts a flow of UOPS. It does not count every time a UOP is read from the microcode ROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort. The event will count MSROM startups for UOPS that are speculative, and subsequently cleared by branch mispredict or machine clear. Background: UOPS are produced by two mechanisms. Either they are generated by hardware that decodes instructions into UOPS, or they are delivered by a ROM (called the MSROM) that holds UOPS associated with a specific instruction. MSROM UOPS might also be delivered in response to some condition such as a fault or other exceptional condition. This event is an excellent mechanism for detecting instructions that require the use of MSROM instructions.",
+ "EventCode": "0xE7",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MS_DECODED.MS_ENTRY",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of times entered into a ucode flow in the FEC. Includes inserted flows due to front-end detected faults or assists. Speculative count."
+ },
+ {
+ "PublicDescription": "Counts the number of times a decode restriction reduced the decode throughput due to wrong instruction length prediction.",
+ "EventCode": "0xE9",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of times a decode restriction reduced the decode throughput due to wrong instruction length prediction"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/memory.json b/tools/perf/pmu-events/arch/x86/silvermont/memory.json
new file mode 100644
index 000000000000..d72e09a5f929
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/memory.json
@@ -0,0 +1,11 @@
+[
+ {
+ "PublicDescription": "This event counts the number of times that pipeline was cleared due to memory ordering issues.",
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Stalls due to Memory ordering"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
new file mode 100644
index 000000000000..7468af99190a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
@@ -0,0 +1,359 @@
+[
+ {
+ "PEBS": "1",
+ "PublicDescription": "ALL_BRANCHES counts the number of any branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of branch instructions retired..."
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "JCC counts the number of conditional branch (JCC) instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x7e",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of JCC branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "TAKEN_JCC counts the number of taken conditional branch (JCC) instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xfe",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of taken JCC branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "CALL counts the number of near CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xf9",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near CALL branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "REL_CALL counts the number of near relative CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xfd",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "IND_CALL counts the number of near indirect CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xfb",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "RETURN counts the number of near RET branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xf7",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near RET branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "NON_RETURN_IND counts the number of near indirect JMP and near indirect CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xeb",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "FAR counts the number of far branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0xbf",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of far branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "ALL_BRANCHES counts the number of any mispredicted branch instructions retired. This umask is an architecturally defined event. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "JCC counts the number of mispredicted conditional branches (JCC) instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0x7e",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted JCC branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "TAKEN_JCC counts the number of mispredicted taken conditional branch (JCC) instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xfe",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted taken JCC branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "IND_CALL counts the number of mispredicted near indirect CALL branch instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xfb",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "RETURN counts the number of mispredicted near RET branch instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xf7",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "NON_RETURN_IND counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "EventCode": "0xC5",
+ "Counter": "0,1",
+ "UMask": "0xeb",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired"
+ },
+ {
+ "PublicDescription": "This event counts the number of micro-ops retired that were supplied from MSROM.",
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.MS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "MSROM micro-ops retired"
+ },
+ {
+ "PublicDescription": "This event counts the number of micro-ops retired. The processor decodes complex macro instructions into a sequence of simpler micro-ops. Most instructions are composed of one or two micro-ops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. In some cases micro-op sequences are fused or whole instructions are fused into one micro-op. See other UOPS_RETIRED events for differentiating retired fused and non-fused micro-ops.",
+ "EventCode": "0xC2",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "UOPS_RETIRED.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Micro-ops retired"
+ },
+ {
+ "PublicDescription": "This event counts the number of times that a program writes to a code section. Self-modifying code causes a severe penalty in all Intel? architecture processors.",
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Self-Modifying Code detected"
+ },
+ {
+ "PublicDescription": "This event counts the number of times that pipeline stalled due to FP operations needing assists.",
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Stalls due to FP assists"
+ },
+ {
+ "PublicDescription": "Machine clears happen when something happens in the machine that causes the hardware to need to take special care to get the right answer. When such a condition is signaled on an instruction, the front end of the machine is notified that it must restart, so no more instructions will be decoded from the current path. All instructions \"older\" than this one will be allowed to finish. This instruction and all \"younger\" instructions must be cleared, since they must not be allowed to complete. Essentially, the hardware waits until the problematic instruction is the oldest instruction in the machine. This means all older instructions are retired, and all pending stores (from older instructions) are completed. Then the new path of instructions from the front end are allowed to start into the machine. There are many conditions that might cause a machine clear (including the receipt of an interrupt, or a trap or a fault). All those conditions (including but not limited to MACHINE_CLEARS.MEMORY_ORDERING, MACHINE_CLEARS.SMC, and MACHINE_CLEARS.FP_ASSIST) are captured in the ANY event. In addition, some conditions can be specifically counted (i.e. SMC, MEMORY_ORDERING, FP_ASSIST). However, the sum of SMC, MEMORY_ORDERING, and FP_ASSIST machine clears will not necessarily equal the number of ANY.",
+ "EventCode": "0xC3",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts all machine clears"
+ },
+ {
+ "PublicDescription": "Counts the number of cycles when no uops are allocated and the ROB is full (less than 2 entries available).",
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "NO_ALLOC_CYCLES.ROB_FULL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated and the ROB is full (less than 2 entries available)"
+ },
+ {
+ "PublicDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted.",
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted "
+ },
+ {
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x20",
+ "EventName": "NO_ALLOC_CYCLES.RAT_STALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated and a RATstall is asserted."
+ },
+ {
+ "PublicDescription": "The NO_ALLOC_CYCLES.NOT_DELIVERED event is used to measure front-end inefficiencies, i.e. when front-end of the machine is not delivering micro-ops to the back-end and the back-end is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into micro-ops (uops) in machine understandable format and putting them into a micro-op queue to be consumed by back end. The back-end then takes these micro-ops, allocates the required resources. When all resources are ready, micro-ops are executed. If the back-end is not ready to accept micro-ops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more UOPS. This event counts the cycles only when back-end is requesting more uops and front-end is not able to provide them. Some examples of conditions that cause front-end efficiencies are: Icache misses, ITLB misses, and decoder restrictions that limit the the front-end bandwidth.",
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x50",
+ "EventName": "NO_ALLOC_CYCLES.NOT_DELIVERED",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated, the IQ is empty, and no other condition is blocking allocation."
+ },
+ {
+ "PublicDescription": "The NO_ALLOC_CYCLES.ALL event counts the number of cycles when the front-end does not provide any instructions to be allocated for any reason. This event indicates the cycles where an allocation stalls occurs, and no UOPS are allocated in that cycle.",
+ "EventCode": "0xCA",
+ "Counter": "0,1",
+ "UMask": "0x3f",
+ "EventName": "NO_ALLOC_CYCLES.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated for any reason."
+ },
+ {
+ "PublicDescription": "Counts the number of cycles and allocation pipeline is stalled and is waiting for a free MEC reservation station entry. The cycles should be appropriately counted in case of the cracked ops e.g. In case of a cracked load-op, the load portion is sent to M.",
+ "EventCode": "0xCB",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "RS_FULL_STALL.MEC",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of cycles and allocation pipeline is stalled and is waiting for a free MEC reservation station entry. The cycles should be appropriately counted in case of the cracked ops e.g. In case of a cracked load-op, the load portion is sent to M"
+ },
+ {
+ "EventCode": "0xCB",
+ "Counter": "0,1",
+ "UMask": "0x1f",
+ "EventName": "RS_FULL_STALL.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of cycles the Alloc pipeline is stalled when any one of the RSs (IEC, FPC and MEC) is full. This event is a superset of all the individual RS stall event counts."
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions that retire execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers.",
+ "EventCode": "0xC0",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired"
+ },
+ {
+ "PublicDescription": "Cycles the divider is busy.This event counts the cycles when the divide unit is unable to accept a new divide UOP because it is busy processing a previously dispatched UOP. The cycles will be counted irrespective of whether or not another divide UOP is waiting to enter the divide unit (from the RS). This event might count cycles while a divide is in progress even if the RS is empty. The divide instruction is one of the longest latency instructions in the machine. Hence, it has a special event associated with it to help determine if divides are delaying the retirement of instructions.",
+ "EventCode": "0xCD",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles the divider is busy. Does not imply a stall waiting for the divider."
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps. Background: Modern microprocessors employ extensive pipelining and speculative techniques. Since sometimes an instruction is started but never completed, the notion of \"retirement\" is introduced. A retired instruction is one that commits its states. Or stated differently, an instruction might be abandoned at some point. No instruction is truly finished until it retires. This counter measures the number of completed instructions. The fixed event is INST_RETIRED.ANY and the programmable event is INST_RETIRED.ANY_P.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired"
+ },
+ {
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. In systems with a constant core frequency, this event can give you a measurement of the elapsed time while the core was not in halt state by dividing the event count by the core frequency. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles"
+ },
+ {
+ "PublicDescription": "Counts the number of reference cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles"
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time.",
+ "EventCode": "0x3C",
+ "Counter": "0,1",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when core is not halted"
+ },
+ {
+ "PublicDescription": "This event counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.",
+ "EventCode": "0x3C",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when core is not halted"
+ },
+ {
+ "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.ANY event counts the number of baclears for any type of branch.",
+ "EventCode": "0xE6",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "BACLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of baclears"
+ },
+ {
+ "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.RETURN event counts the number of RETURN baclears.",
+ "EventCode": "0xE6",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "BACLEARS.RETURN",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of RETURN baclears"
+ },
+ {
+ "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.COND event counts the number of JCC (Jump on Condtional Code) baclears.",
+ "EventCode": "0xE6",
+ "Counter": "0,1",
+ "UMask": "0x10",
+ "EventName": "BACLEARS.COND",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of JCC baclears"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "ALL_TAKEN_BRANCHES counts the number of all taken branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "EventCode": "0xC4",
+ "Counter": "0,1",
+ "UMask": "0x80",
+ "PEBScounters": "0,1",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of taken branch instructions retired"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
new file mode 100644
index 000000000000..ad31479f8f60
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
@@ -0,0 +1,69 @@
+[
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load ops retired that had DTLB miss.",
+ "EventCode": "0x04",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Loads missed DTLB"
+ },
+ {
+ "PublicDescription": "This event counts when a data (D) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_WALKS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "D-side page-walks",
+ "EdgeDetect": "1"
+ },
+ {
+ "PublicDescription": "This event counts every cycle when a D-side (walks due to a load) page walk is in progress. Page walk duration divided by number of page walks is the average duration of page-walks.",
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of D-side page-walks in core cycles"
+ },
+ {
+ "PublicDescription": "This event counts when an instruction (I) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_WALKS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "I-side page-walks",
+ "EdgeDetect": "1"
+ },
+ {
+ "PublicDescription": "This event counts every cycle when a I-side (walks due to an instruction fetch) page walk is in progress. Page walk duration divided by number of page walks is the average duration of page-walks.",
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Duration of I-side page-walks in core cycles"
+ },
+ {
+ "PublicDescription": "This event counts when a data (D) page walk or an instruction (I) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.WALKS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Total page walks that are completed (I-side and D-side)",
+ "EdgeDetect": "1"
+ },
+ {
+ "PublicDescription": "This event counts every cycle when a data (D) page walk or instruction (I) page walk is in progress. Since a pagewalk implies a TLB miss, the approximate cost of a TLB miss can be determined from this event.",
+ "EventCode": "0x05",
+ "Counter": "0,1",
+ "UMask": "0x3",
+ "EventName": "PAGE_WALKS.CYCLES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Total cycles for all the page walks. (I-side and D-side)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
new file mode 100644
index 000000000000..0551a9ba865d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -0,0 +1,4299 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x11",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x12",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions with locked access.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x81",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired load instructions.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x82",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "All retired store instructions.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1",
+ "L1_Hit_Indication": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L1 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L1 cache as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xD4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "CounterHTOff": "0,1,2,3",
+ "Data_LA": "1"
+ },
+ {
+ "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "EventCode": "0x51",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D.REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D data line replacements",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand\n from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand and prefetch data reads",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "Errata": "SKL057",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "Errata": "SKL057",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cache line split locks sent to the uncore.",
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventCode": "0xB7, 0xBB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x21",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe1",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand Data Read requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe2",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the total number of L2 code requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe4",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 code requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf8",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x38",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xd8",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x42",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RFO requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x22",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x44",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "L2 cache misses when fetching instructions.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x27",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand requests to L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe7",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Demand requests to L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "All requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All requests that miss L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "All L2 requests.",
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "All L2 requests",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache.",
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "L2_LINES_IN.ALL",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "L2 cache lines filling L2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0408000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000408000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400408000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200408000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100408000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080408000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040408000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc01c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0108000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_S & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000108000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_S & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400108000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_S & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200108000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_S & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100108000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080108000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040108000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_S & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0088000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_E & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000088000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_E & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400088000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_E & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200088000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_E & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100088000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080088000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040088000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_E & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_M & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_M & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_M & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_M & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_M & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000018000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts any other requests that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0400800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc01c0800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001c0800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001c0800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts streaming stores that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001c0800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts streaming stores that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001c0800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts streaming stores that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801c0800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401c0800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0100800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_S & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_S & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0080800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_E & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_E & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_M & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_M & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0020800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts streaming stores that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0400100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc01c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0100100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_S & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_S & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0080100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_E & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_E & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_M & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_M & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0400080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc01c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0100080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0080080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0400004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc01c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0100004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0080004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand code reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0400002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc01c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0100002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_S & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_S & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0080002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_E & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_E & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_M & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_M & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0020002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0400001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000400001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400400001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200400001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100400001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080400001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040400001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc01c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10001c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x04001c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x02001c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoops sent to sibling cores return clean response.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x01001c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00801c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00401c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0100001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000100001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400100001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200100001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100100001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080100001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040100001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0080001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000080001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400080001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200080001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100080001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080080001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040080001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc0020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1000020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0400020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0200020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0100020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0080020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0040020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0000010001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts demand data reads that have any response type.",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
new file mode 100644
index 000000000000..3c6b59af5d54
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
@@ -0,0 +1,68 @@
+[
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "EventCode": "0xCA",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1e",
+ "EventName": "FP_ASSIST.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
new file mode 100644
index 000000000000..e697dbd63e6e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -0,0 +1,472 @@
+[
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "IDQ.MITE_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "IDQ.DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x18",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x24",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding ?4 ? x? when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread\n\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions)\n \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0?2 cycles.",
+ "EventCode": "0xAB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x15",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x400206",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x200206",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x400406",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EdgeDetect": "1",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x400806",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x401006",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x402006",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x404006",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x408006",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x410006",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x420006",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x100206",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC6",
+ "MSRValue": "0x300206",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+ "MSRIndex": "0x3F7",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
new file mode 100644
index 000000000000..d7fd5b06825b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -0,0 +1,2309 @@
+[
+ {
+ "PublicDescription": "Unfriendly TSX abort triggered by a flowmarker.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_EXEC.MISC2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_EXEC.MISC3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "RTM region detected inside HLE.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_EXEC.MISC4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "EventCode": "0x5d",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "HLE_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution started.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times HLE commit succeeded.",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution successfully committed",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of times HLE abort was triggered.",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "HLE_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.). ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RTM_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution started.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered.",
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RTM_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "EventCode": "0xC9",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "SKL089",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50021",
+ "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20011",
+ "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x40",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2003",
+ "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1009",
+ "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x100",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "503",
+ "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "EventCode": "0xCD",
+ "MSRValue": "0x200",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "101",
+ "BriefDescription": "Counts loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "TakenAlone": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "EventCode": "0x54",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x6",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "CounterMask": "6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3ffc008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007c008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc4008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044008000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_MISS_LOCAL_DRAM & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000408000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001c8000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000108000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_S & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000088000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_E & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000048000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & L3_HIT_M & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000028000 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "OTHER & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3ffc000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203c000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103c000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007c000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc4000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_MISS_LOCAL_DRAM & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001c0800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_S & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_E & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & L3_HIT_M & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020800 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "STREAMING_STORES & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3ffc000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007c000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc4000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_MISS_LOCAL_DRAM & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001c0100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_S & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_E & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & L3_HIT_M & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020100 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3ffc000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007c000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc4000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001c0080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_S & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_E & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & L3_HIT_M & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020080 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "PF_L3_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3ffc000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007c000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc4000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001c0004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_S & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_E & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & L3_HIT_M & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020004 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_CODE_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3ffc000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007c000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc4000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_MISS_LOCAL_DRAM & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001c0002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_S & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_E & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & L3_HIT_M & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020002 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_RFO & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3ffc000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x043c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x023c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x013c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x00bc000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x007c000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3fc4000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HITM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0404000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_HIT_NO_FWD",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0204000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0104000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NOT_NEEDED",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0084000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x0044000001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SPL_HIT",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000400001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L4_HIT_LOCAL_L4 & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20001c0001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000100001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_S & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000080001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_E & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000040001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & L3_HIT_M & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2000020001 ",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "DEMAND_DATA_RD & SUPPLIER_NONE & SNOOP_NON_DRAM",
+ "Offcore": "1",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/other.json b/tools/perf/pmu-events/arch/x86/skylake/other.json
new file mode 100644
index 000000000000..cfdc323acc82
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/other.json
@@ -0,0 +1,12 @@
+[
+ {
+ "PublicDescription": "This event counts the number of hardware interruptions received by the processor.",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
new file mode 100644
index 000000000000..0f7adb809be3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -0,0 +1,939 @@
+[
+ {
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Instructions retired from execution.",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x3",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "CounterHTOff": "Fixed counter 3"
+ },
+ {
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Resource-related stall cycles",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.SB",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "EventCode": "0x0E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "EventCode": "0x5E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of machine clears (nukes) of any type.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EdgeDetect": "1",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of machine clears (nukes) of any type. ",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
+ "EventCode": "0xC0",
+ "Counter": "1",
+ "UMask": "0x1",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "CounterHTOff": "1"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts the number of retirement slots used.",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Retirement slots used.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts cycles without actually retired uops.",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts both direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts return instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Return instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Taken branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts far branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Far branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "Errata": "SKL091",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All (macro) branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "SampleAfterValue": "400009",
+ "BriefDescription": "Mispredicted macro branch instructions retired. ",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Number of uops executed from any thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of uops executed on the core.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_EXECUTED.X87",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "EventCode": "0xA1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
+ "EventCode": "0x4C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap)\n\n - store forwarding is impossible due to u-arch limitations\n\n - preceding lock RMW operations are not forwarded\n\n - store has the no-forward bit set (uncacheable/page-split/masked stores)\n\n - all-blocking stores are used (mostly, fences and port I/O)\n\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "EventCode": "0x07",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x5",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x14",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PEBS": "2",
+ "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,2,3",
+ "UMask": "0x1",
+ "Errata": "SKL091, SKL044",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "CounterMask": "10",
+ "CounterHTOff": "0,2,3"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register.\r\nFor more information, refer to ?Mixing Intel AVX and Intel SSE Code? section of the Optimization Guide.",
+ "EventCode": "0x0E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x00",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x2",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EdgeDetect": "1",
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "2503",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
new file mode 100644
index 000000000000..02f32cbf6789
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
@@ -0,0 +1,272 @@
+[
+ {
+ "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
+ "EventCode": "0xBD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "SampleAfterValue": "100007",
+ "BriefDescription": "STLB flush attempts",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake. ",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake. ",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
new file mode 100644
index 000000000000..6e61ae20d01a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json
@@ -0,0 +1,2817 @@
+[
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "CACHE_LOCK_CYCLES.L1D",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D locked"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CACHE_LOCK_CYCLES.L1D_L2",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D and L2 locked"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D.M_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines replaced in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D.M_REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines allocated in the M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D.M_SNOOP_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D snoop eviction of cache lines in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D.REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache lines allocated"
+ },
+ {
+ "EventCode": "0x52",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D prefetch load lock accepted in fill buffer"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_PREFETCH.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch misses"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_PREFETCH.REQUESTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_PREFETCH.TRIGGERS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests triggered"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D_WB_L2.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in E state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D_WB_L2.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in I state (misses)"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D_WB_L2.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in M state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L1D_WB_L2.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L1 writebacks to L2"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_WB_L2.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_DATA_RQSTS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data prefetches"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the S state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines alloacated"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the E state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the S state"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_LINES_OUT.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.IFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.IFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.IFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.LD_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_RQSTS.LD_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.LOADS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xaa",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PREFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PREFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.PREFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 prefetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.RFOS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO requests"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANSACTIONS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANSACTIONS.FILL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 fill transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANSACTIONS.IFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANSACTIONS.L1D_WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D writeback to L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANSACTIONS.LOAD",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 Load transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANSACTIONS.PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANSACTIONS.RFO",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANSACTIONS.WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 writeback to LLC transactions"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_WRITE.LOCK.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in E state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe0",
+ "EventName": "L2_WRITE.LOCK.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_WRITE.LOCK.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_WRITE.LOCK.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_WRITE.LOCK.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_WRITE.LOCK.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in S state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "L2_WRITE.RFO.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_WRITE.RFO.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_WRITE.RFO.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_WRITE.RFO.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_WRITE.RFO.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in S state"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Longest latency cache miss"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Longest latency cache reference"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_INST_RETIRED.LOADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a load (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_INST_RETIRED.STORES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a store (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "OFFCORE_REQUESTS.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ANY.READ",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS.ANY.RFO",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.READ_CODE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.READ_DATA",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.RFO",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore L1 data cache writebacks"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore reads busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand code reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand code read busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand data reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand data read busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand RFOs"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand RFOs busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_SQ_FULL",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests blocked due to Super Queue full"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SQ_MISC.LRU_HINTS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue LRU hints sent to LLC"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue lock splits across a cache line"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "STORE_BLOCKS.AT_RET",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Loads delayed with at-Retirement block code"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "STORE_BLOCKS.L1D_BLOCK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cacheable loads delayed with L1D block code"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x0",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x400",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100",
+ "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1000",
+ "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5",
+ "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x800",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50",
+ "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "500",
+ "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5000",
+ "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "3",
+ "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50000",
+ "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x1000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20",
+ "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "200",
+ "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x2000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10",
+ "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x111",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x211",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x411",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x711",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x811",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f44",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff44",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x144",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x444",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x744",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x844",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x50ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7fff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xffff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x80ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f22",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff22",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x222",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x422",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x722",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x822",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x108",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x208",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x408",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x708",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x808",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f77",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff77",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x177",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x277",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x477",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x777",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x877",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f33",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff33",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x133",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x233",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x433",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x733",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x833",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x403",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x703",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x803",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f01",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff01",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x101",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x201",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x401",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x701",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x801",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f04",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff04",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x104",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x204",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x404",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x704",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x804",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f02",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff02",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x102",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x202",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x402",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x702",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x802",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x180",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x280",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x480",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x780",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x880",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f50",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff50",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x150",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x250",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x450",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x750",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x850",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x110",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x210",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x410",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x710",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x810",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f40",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff40",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x140",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x240",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x440",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x740",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x840",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x120",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x220",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x420",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x720",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x820",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ALL_LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = ALL_LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7f70",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = ANY_CACHE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xff70",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = ANY_LOCATION",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = IO_CSR_MMIO",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x170",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = LLC_HIT_NO_OTHER_CORE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x270",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x470",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = LLC_HIT_OTHER_CORE_HITM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x770",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = LOCAL_CACHE",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = LOCAL_DRAM AND REMOTE_CACHE_HIT",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x870",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = REMOTE_CACHE_HITM",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
new file mode 100644
index 000000000000..7d2f71a9dee3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json
@@ -0,0 +1,229 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_ASSIST.ALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.INPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.OUTPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_COMP_OPS_EXE.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MMX Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP double precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE and SSE2 FP Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP packed Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP scalar Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP single precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE2 integer Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Computational floating-point operations executed"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_MMX_TRANS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All Floating Point to and from MMX transitions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_MMX_TRANS.TO_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from MMX to Floating Point instructions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_MMX_TRANS.TO_MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from Floating Point to MMX instructions"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_128.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer pack operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_128.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer arithmetic operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_128.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer logical operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_128.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer multiply operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_128.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shift operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_128.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shuffle/move operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_128.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer unpack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_64.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit pack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_64.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit arithmetic operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_64.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit logical operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_64.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit packed multiply operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_64.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shift operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_64.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shuffle/move operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_64.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit unpack operations"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json
new file mode 100644
index 000000000000..e5e21e03444d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json
@@ -0,0 +1,26 @@
+[
+ {
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions decoded"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.FUSIONS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused instructions decoded"
+ },
+ {
+ "EventCode": "0x19",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TWO_UOP_INSTS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Two Uop instructions decoded"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json
new file mode 100644
index 000000000000..6e0829b7617f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json
@@ -0,0 +1,758 @@
+[
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Misaligned store references"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf811",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_DATA read and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf844",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY IFETCH and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x30ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf8ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x40ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20ff",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf822",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = ANY RFO and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf808",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = CORE_WB and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf877",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IFETCH and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf833",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DATA_IN and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf803",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf801",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_DATA_RD and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf804",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf802",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf880",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = OTHER and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf850",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf810",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf840",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf820",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PF_IFETCH and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x3070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM_AND_REMOTE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = ANY_DRAM AND REMOTE_FWD",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xf870",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = ANY_LLC_MISS",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.OTHER_LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = OTHER_LOCAL_DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "REQUEST = PREFETCH and RESPONSE = REMOTE_DRAM",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
new file mode 100644
index 000000000000..85133d6a5ce0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/other.json
@@ -0,0 +1,287 @@
+[
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Early Branch Prediciton Unit clears"
+ },
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Late Branch Prediction Unit clears"
+ },
+ {
+ "EventCode": "0xE5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch prediction unit missed call or return"
+ },
+ {
+ "EventCode": "0xD5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ES_REG_RENAMES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ES segment renames"
+ },
+ {
+ "EventCode": "0x6C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IO_TRANSACTIONS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "I/O transactions"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch stall cycles"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch hits"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch misses"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I Instruction fetches"
+ },
+ {
+ "EventCode": "0x82",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Large ITLB hit"
+ },
+ {
+ "EventCode": "0x3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_BLOCK.OVERLAP_STORE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Loads that partially overlap an earlier store"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "LOAD_DISPATCH.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All loads dispatched"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "LOAD_DISPATCH.MOB",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_DISPATCH.RS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched that bypass the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_DISPATCH.RS_DELAYED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from stage 305"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PARTIAL_ADDRESS_ALIAS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "False dependencies due to partial address aliasing"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All RAT stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Flag stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Partial register stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB read port stalls cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Scoreboard stall cycles"
+ },
+ {
+ "EventCode": "0x4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "SB_DRAIN.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All Store buffer stall cycles"
+ },
+ {
+ "EventCode": "0xD4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SEG_RENAME_STALLS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Segment rename stall cycles"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SNOOP_RESPONSE.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HIT to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SNOOP_RESPONSE.HITE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITE to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SNOOP_RESPONSE.HITM",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITM to snoop"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS.CODE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop code requests"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS.DATA",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop data requests"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS.INVALIDATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop invalidate requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop code requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop code requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop data requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop data requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop invalidate requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop invalidate requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xF6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SQ_FULL_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue full stall cycles"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json
new file mode 100644
index 000000000000..f130510f7616
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json
@@ -0,0 +1,899 @@
+[
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.CYCLES_DIV_BUSY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles the divider is busy"
+ },
+ {
+ "EventCode": "0x14",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.DIV",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Divide Operations executed",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ARITH.MUL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Multiply operations executed"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BACLEAR.BAD_TARGET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted with bad target address"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR.CLEAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted, regardless of cause "
+ },
+ {
+ "EventCode": "0xA7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR_FORCE_IQ",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction queue forced BACLEAR"
+ },
+ {
+ "EventCode": "0xE0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch instructions decoded"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_INST_EXEC.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_EXEC.COND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Conditional branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_EXEC.DIRECT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Unconditional branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Unconditional call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_INST_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_INST_EXEC.NON_CALLS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect return branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_EXEC.TAKEN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired conditional branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Retired near call instructions (Precise Event)"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_MISP_EXEC.ANY",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_EXEC.COND",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted conditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_EXEC.DIRECT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted unconditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_MISP_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_MISP_EXEC.NON_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_MISP_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted return branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_MISP_EXEC.TAKEN",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted retired branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted conditional retired branches (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted near retired calls (Precise Event)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reference cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_P",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total CPU cycles",
+ "CounterMask": "2"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "ILD_STALL.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Any Instruction Length Decoder stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction Queue full stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Length Change Prefix stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ILD_STALL.MRU",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stall cycles due to BPU MRU bypass"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "ILD_STALL.REGEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Regen stall cycles"
+ },
+ {
+ "EventCode": "0x18",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_DECODED.DEC0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions that must be decoded by decoder 0"
+ },
+ {
+ "EventCode": "0x1E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles instructions are written to the instruction queue"
+ },
+ {
+ "EventCode": "0x17",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions written to instruction queue."
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (fixed counter)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (Programmable counter and Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "INST_RETIRED.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired MMX instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired floating-point operations (Precise Event)"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Load operations conflicting with software prefetches"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xA8",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.INACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD_OVERFLOW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loops that can't stream from the instruction queue"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Cycles machine clear asserted"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEM_ORDER",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Self-Modifying Code detected"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "RESOURCE_STALLS.FPCW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "FPU control word write stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RESOURCE_STALLS.LOAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Load buffer stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RESOURCE_STALLS.MXCSR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MXCSR rename stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "RESOURCE_STALLS.OTHER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Other Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reservation Station full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.STORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Store buffer stall cycles"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)"
+ },
+ {
+ "EventCode": "0xDB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOP_UNFUSION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uop unfusions due to FP exceptions"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DECODED.ESP_FOLDING",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer instructions decoded"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DECODED.ESP_SYNC",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer sync operations"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops decoded by Microcode Sequencer",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xD1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DECODED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops are decoded",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on any port (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.PORT0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 0"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 0, 1 or 5"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.PORT1",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT2_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 2 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT234_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 2, 3 or 4"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT3_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 3 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT4_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 4 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED.PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 5"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued on any thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops were issued on either thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Fused Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are being retired",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retirement slots used (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
new file mode 100644
index 000000000000..57b53562e2bd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json
@@ -0,0 +1,173 @@
+[
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load misses"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "DTLB_LOAD_MISSES.LARGE_WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss large page walks"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_LOAD_MISSES.PDE_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss caused by low part of address"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "DTLB second level hit"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss page walks complete"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss page walk cycles"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "DTLB_MISSES.LARGE_WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB miss large page walks"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_MISSES.PDE_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses casued by low part of address"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB first level misses but second level hit"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB miss page walks"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "DTLB miss page walk cycles"
+ },
+ {
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Extended Page Table walk cycles"
+ },
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_FLUSH",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ITLB flushes"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISS_RETIRED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "ITLB_MISSES.LARGE_WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss large page walks"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss page walks"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ITLB miss page walk cycles"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss the DTLB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_STORE_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired stores that miss the DTLB (Precise Event)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
new file mode 100644
index 000000000000..dad20f0e3cac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json
@@ -0,0 +1,3233 @@
+[
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "CACHE_LOCK_CYCLES.L1D",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D locked"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CACHE_LOCK_CYCLES.L1D_L2",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D and L2 locked"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D.M_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines replaced in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D.M_REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines allocated in the M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D.M_SNOOP_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D snoop eviction of cache lines in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D.REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache lines allocated"
+ },
+ {
+ "EventCode": "0x52",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D prefetch load lock accepted in fill buffer"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_PREFETCH.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch misses"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_PREFETCH.REQUESTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_PREFETCH.TRIGGERS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests triggered"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D_WB_L2.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in E state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D_WB_L2.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in I state (misses)"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D_WB_L2.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in M state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L1D_WB_L2.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L1 writebacks to L2"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_WB_L2.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_DATA_RQSTS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data prefetches"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the S state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines alloacated"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the E state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the S state"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_LINES_OUT.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.IFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.IFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.IFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.LD_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_RQSTS.LD_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.LOADS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xaa",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PREFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PREFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.PREFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 prefetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.RFOS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO requests"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANSACTIONS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANSACTIONS.FILL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 fill transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANSACTIONS.IFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANSACTIONS.L1D_WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D writeback to L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANSACTIONS.LOAD",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 Load transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANSACTIONS.PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANSACTIONS.RFO",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANSACTIONS.WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 writeback to LLC transactions"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_WRITE.LOCK.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in E state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe0",
+ "EventName": "L2_WRITE.LOCK.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_WRITE.LOCK.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_WRITE.LOCK.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_WRITE.LOCK.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_WRITE.LOCK.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in S state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "L2_WRITE.RFO.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_WRITE.RFO.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_WRITE.RFO.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_WRITE.RFO.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_WRITE.RFO.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in S state"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Longest latency cache miss"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Longest latency cache reference"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_INST_RETIRED.LOADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a load (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_INST_RETIRED.STORES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a store (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_UNCORE_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Load instructions retired with a data source of local DRAM or locally homed remote hitm (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_UNCORE_RETIRED.OTHER_CORE_L2_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Load instructions retired that HIT modified data in sibling core (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Load instructions retired remote cache HIT data source (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_UNCORE_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_UNCORE_RETIRED.UNCACHEABLE",
+ "SampleAfterValue": "4000",
+ "BriefDescription": "Load instructions retired IO (Precise Event)"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "OFFCORE_REQUESTS.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ANY.READ",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS.ANY.RFO",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.READ_CODE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.READ_DATA",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.RFO",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore L1 data cache writebacks"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "OFFCORE_REQUESTS.UNCACHED_MEM",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore uncached memory accesses"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore reads busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand code reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand code read busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand data reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand data read busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand RFOs"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand RFOs busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_SQ_FULL",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests blocked due to Super Queue full"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SQ_MISC.LRU_HINTS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue LRU hints sent to LLC"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue lock splits across a cache line"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "STORE_BLOCKS.AT_RET",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Loads delayed with at-Retirement block code"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "STORE_BLOCKS.L1D_BLOCK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cacheable loads delayed with L1D block code"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x0",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x400",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100",
+ "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1000",
+ "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5",
+ "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x800",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50",
+ "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "500",
+ "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5000",
+ "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "3",
+ "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50000",
+ "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x1000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20",
+ "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "200",
+ "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x2000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10",
+ "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF11",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x111",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x211",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x411",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x711",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2711",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1811",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5811",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x811",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F44",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF44",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x144",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x244",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x444",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x744",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2744",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1844",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5844",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x844",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7FFF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFFFF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x80FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x27FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x18FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x58FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x10FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F22",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF22",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x122",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x222",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x422",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x722",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2722",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1822",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5822",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x822",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF08",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore writebacks",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x108",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x408",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x708",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2708",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1808",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5808",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x808",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F77",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF77",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore code or data read requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x177",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x277",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x477",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x777",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2777",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1877",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5877",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x877",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F33",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any cache_dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF33",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any location",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x133",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x233",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x433",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x733",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = local cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2733",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = local cache or dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1833",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5833",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = remote cache or dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x833",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF03",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand data requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x103",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x203",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x403",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x703",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2703",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1803",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5803",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x803",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F01",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF01",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x101",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x201",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x401",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x701",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2701",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1801",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5801",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x801",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F04",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF04",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x104",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x204",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x404",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x704",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2704",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1804",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5804",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x804",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F02",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF02",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x102",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x202",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x402",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x702",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2702",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1802",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5802",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x802",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore other requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x180",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x280",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x480",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x780",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2780",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1880",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5880",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x880",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F50",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF50",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch data requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x150",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x250",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x450",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x750",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2750",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1850",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5850",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x850",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x110",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x210",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x410",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x710",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2710",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1810",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5810",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x810",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F40",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF40",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x140",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x240",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x440",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x740",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2740",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1840",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5840",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x840",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x120",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x220",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x420",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x720",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2720",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1820",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5820",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x820",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x7F70",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xFF70",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x8070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x170",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x270",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x470",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x770",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2770",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1870",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x5870",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x1070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x870",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
new file mode 100644
index 000000000000..7d2f71a9dee3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json
@@ -0,0 +1,229 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_ASSIST.ALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.INPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.OUTPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_COMP_OPS_EXE.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MMX Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP double precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE and SSE2 FP Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP packed Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP scalar Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP single precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE2 integer Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Computational floating-point operations executed"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_MMX_TRANS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All Floating Point to and from MMX transitions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_MMX_TRANS.TO_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from MMX to Floating Point instructions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_MMX_TRANS.TO_MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from Floating Point to MMX instructions"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_128.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer pack operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_128.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer arithmetic operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_128.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer logical operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_128.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer multiply operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_128.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shift operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_128.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shuffle/move operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_128.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer unpack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_64.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit pack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_64.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit arithmetic operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_64.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit logical operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_64.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit packed multiply operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_64.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shift operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_64.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shuffle/move operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_64.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit unpack operations"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json
new file mode 100644
index 000000000000..e5e21e03444d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json
@@ -0,0 +1,26 @@
+[
+ {
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions decoded"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.FUSIONS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused instructions decoded"
+ },
+ {
+ "EventCode": "0x19",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TWO_UOP_INSTS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Two Uop instructions decoded"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json
new file mode 100644
index 000000000000..90eb6aac357b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json
@@ -0,0 +1,739 @@
+[
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF811",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4011",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF844",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4044",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x60FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF8FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x20FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x40FF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF822",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4022",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF808",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4008",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF877",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4077",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF833",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any LLC miss",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4033",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF803",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4003",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF801",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4001",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF804",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4004",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF802",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4002",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF880",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4080",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF850",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4050",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF810",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4010",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF840",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4040",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF820",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4020",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x6070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0xF870",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x2070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "MSRValue": "0x4070",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
new file mode 100644
index 000000000000..85133d6a5ce0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/other.json
@@ -0,0 +1,287 @@
+[
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Early Branch Prediciton Unit clears"
+ },
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Late Branch Prediction Unit clears"
+ },
+ {
+ "EventCode": "0xE5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch prediction unit missed call or return"
+ },
+ {
+ "EventCode": "0xD5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ES_REG_RENAMES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ES segment renames"
+ },
+ {
+ "EventCode": "0x6C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IO_TRANSACTIONS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "I/O transactions"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch stall cycles"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch hits"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch misses"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I Instruction fetches"
+ },
+ {
+ "EventCode": "0x82",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Large ITLB hit"
+ },
+ {
+ "EventCode": "0x3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_BLOCK.OVERLAP_STORE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Loads that partially overlap an earlier store"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "LOAD_DISPATCH.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All loads dispatched"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "LOAD_DISPATCH.MOB",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_DISPATCH.RS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched that bypass the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_DISPATCH.RS_DELAYED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from stage 305"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PARTIAL_ADDRESS_ALIAS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "False dependencies due to partial address aliasing"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All RAT stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Flag stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Partial register stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB read port stalls cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Scoreboard stall cycles"
+ },
+ {
+ "EventCode": "0x4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "SB_DRAIN.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All Store buffer stall cycles"
+ },
+ {
+ "EventCode": "0xD4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SEG_RENAME_STALLS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Segment rename stall cycles"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SNOOP_RESPONSE.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HIT to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SNOOP_RESPONSE.HITE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITE to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SNOOP_RESPONSE.HITM",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITM to snoop"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS.CODE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop code requests"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS.DATA",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop data requests"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS.INVALIDATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop invalidate requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop code requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop code requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop data requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop data requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop invalidate requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop invalidate requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xF6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SQ_FULL_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue full stall cycles"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json
new file mode 100644
index 000000000000..f130510f7616
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json
@@ -0,0 +1,899 @@
+[
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.CYCLES_DIV_BUSY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles the divider is busy"
+ },
+ {
+ "EventCode": "0x14",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.DIV",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Divide Operations executed",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ARITH.MUL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Multiply operations executed"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BACLEAR.BAD_TARGET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted with bad target address"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR.CLEAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted, regardless of cause "
+ },
+ {
+ "EventCode": "0xA7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR_FORCE_IQ",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction queue forced BACLEAR"
+ },
+ {
+ "EventCode": "0xE0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch instructions decoded"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_INST_EXEC.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_EXEC.COND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Conditional branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_EXEC.DIRECT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Unconditional branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Unconditional call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_INST_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_INST_EXEC.NON_CALLS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect return branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_EXEC.TAKEN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired conditional branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Retired near call instructions (Precise Event)"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_MISP_EXEC.ANY",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_EXEC.COND",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted conditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_EXEC.DIRECT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted unconditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_MISP_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_MISP_EXEC.NON_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_MISP_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted return branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_MISP_EXEC.TAKEN",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted retired branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted conditional retired branches (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted near retired calls (Precise Event)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reference cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_P",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total CPU cycles",
+ "CounterMask": "2"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "ILD_STALL.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Any Instruction Length Decoder stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction Queue full stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Length Change Prefix stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ILD_STALL.MRU",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stall cycles due to BPU MRU bypass"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "ILD_STALL.REGEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Regen stall cycles"
+ },
+ {
+ "EventCode": "0x18",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_DECODED.DEC0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions that must be decoded by decoder 0"
+ },
+ {
+ "EventCode": "0x1E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles instructions are written to the instruction queue"
+ },
+ {
+ "EventCode": "0x17",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions written to instruction queue."
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (fixed counter)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (Programmable counter and Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "INST_RETIRED.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired MMX instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired floating-point operations (Precise Event)"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Load operations conflicting with software prefetches"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xA8",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.INACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD_OVERFLOW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loops that can't stream from the instruction queue"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Cycles machine clear asserted"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEM_ORDER",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Self-Modifying Code detected"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "RESOURCE_STALLS.FPCW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "FPU control word write stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RESOURCE_STALLS.LOAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Load buffer stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RESOURCE_STALLS.MXCSR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MXCSR rename stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "RESOURCE_STALLS.OTHER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Other Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reservation Station full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.STORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Store buffer stall cycles"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)"
+ },
+ {
+ "EventCode": "0xDB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOP_UNFUSION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uop unfusions due to FP exceptions"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DECODED.ESP_FOLDING",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer instructions decoded"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DECODED.ESP_SYNC",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer sync operations"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops decoded by Microcode Sequencer",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xD1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DECODED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops are decoded",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on any port (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.PORT0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 0"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 0, 1 or 5"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.PORT1",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT2_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 2 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT234_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 2, 3 or 4"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT3_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 3 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT4_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 4 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED.PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 5"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued on any thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops were issued on either thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Fused Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are being retired",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retirement slots used (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
new file mode 100644
index 000000000000..2153b3f5d7b0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json
@@ -0,0 +1,149 @@
+[
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load misses"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_LOAD_MISSES.PDE_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss caused by low part of address"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "DTLB second level hit"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss page walks complete"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss page walk cycles"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "DTLB_MISSES.LARGE_WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB miss large page walks"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB first level misses but second level hit"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB miss page walks"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "DTLB miss page walk cycles"
+ },
+ {
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Extended Page Table walk cycles"
+ },
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_FLUSH",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ITLB flushes"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISS_RETIRED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss page walks"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ITLB miss page walk cycles"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss the DTLB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_STORE_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired stores that miss the DTLB (Precise Event)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/cache.json b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
new file mode 100644
index 000000000000..f9bc7fdd48d6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereex/cache.json
@@ -0,0 +1,3225 @@
+[
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "CACHE_LOCK_CYCLES.L1D",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D locked"
+ },
+ {
+ "EventCode": "0x63",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "CACHE_LOCK_CYCLES.L1D_L2",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles L1D and L2 locked"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D.M_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines replaced in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D.M_REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D cache lines allocated in the M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x8",
+ "EventName": "L1D.M_SNOOP_EVICT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D snoop eviction of cache lines in M state"
+ },
+ {
+ "EventCode": "0x51",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D.REPL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1 data cache lines allocated"
+ },
+ {
+ "EventCode": "0x52",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1D prefetch load lock accepted in fill buffer"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x2",
+ "EventName": "L1D_PREFETCH.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch misses"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "L1D_PREFETCH.REQUESTS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests"
+ },
+ {
+ "EventCode": "0x4E",
+ "Counter": "0,1",
+ "UMask": "0x4",
+ "EventName": "L1D_PREFETCH.TRIGGERS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D hardware prefetch requests triggered"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1D_WB_L2.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in E state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1D_WB_L2.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in I state (misses)"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L1D_WB_L2.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in M state"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L1D_WB_L2.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L1 writebacks to L2"
+ },
+ {
+ "EventCode": "0x28",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1D_WB_L2.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L1 writebacks to L2 in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_DATA_RQSTS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand requests"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data demand loads in S state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in E state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the I state (misses)"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in M state"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 data prefetches"
+ },
+ {
+ "EventCode": "0x26",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 data prefetches in the S state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "L2_LINES_IN.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines alloacated"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_IN.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the E state"
+ },
+ {
+ "EventCode": "0xF1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_IN.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines allocated in the S state"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_LINES_OUT.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a demand request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0xF2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 modified lines evicted by a prefetch request"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_RQSTS.IFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_RQSTS.IFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "L2_RQSTS.IFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_RQSTS.LD_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_RQSTS.LD_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 load misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L2_RQSTS.LOADS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xaa",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_RQSTS.PREFETCH_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_RQSTS.PREFETCH_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc0",
+ "EventName": "L2_RQSTS.PREFETCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 prefetches"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xff",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 requests"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO hits"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO misses"
+ },
+ {
+ "EventCode": "0x24",
+ "Counter": "0,1,2,3",
+ "UMask": "0xc",
+ "EventName": "L2_RQSTS.RFOS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO requests"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_TRANSACTIONS.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_TRANSACTIONS.FILL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 fill transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L2_TRANSACTIONS.IFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 instruction fetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_TRANSACTIONS.L1D_WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L1D writeback to L2 transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_TRANSACTIONS.LOAD",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 Load transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_TRANSACTIONS.PREFETCH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 prefetch transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_TRANSACTIONS.RFO",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 RFO transactions"
+ },
+ {
+ "EventCode": "0xF0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_TRANSACTIONS.WB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "L2 writeback to LLC transactions"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "L2_WRITE.LOCK.E_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in E state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe0",
+ "EventName": "L2_WRITE.LOCK.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "L2_WRITE.LOCK.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "L2_WRITE.LOCK.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf0",
+ "EventName": "L2_WRITE.LOCK.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All demand L2 lock RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "L2_WRITE.LOCK.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand lock RFOs in S state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xe",
+ "EventName": "L2_WRITE.RFO.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs that hit the cache"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L2_WRITE.RFO.I_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in I state (misses)"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "L2_WRITE.RFO.M_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in M state"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "L2_WRITE.RFO.MESI",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All L2 demand store RFOs"
+ },
+ {
+ "EventCode": "0x27",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L2_WRITE.RFO.S_STATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "L2 demand store RFOs in S state"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x41",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Longest latency cache miss"
+ },
+ {
+ "EventCode": "0x2E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4f",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Longest latency cache reference"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_INST_RETIRED.LOADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a load (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_INST_RETIRED.STORES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired which contains a store (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MEM_UNCORE_RETIRED.LOCAL_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Load instructions retired that HIT modified data in sibling core (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "MEM_UNCORE_RETIRED.LOCAL_DRAM_AND_REMOTE_CACHE_HIT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Load instructions retired local dram and remote cache HIT data sources (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "MEM_UNCORE_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_UNCORE_RETIRED.UNCACHEABLE",
+ "SampleAfterValue": "4000",
+ "BriefDescription": "Load instructions retired IO (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MEM_UNCORE_RETIRED.REMOTE_HITM",
+ "SampleAfterValue": "40000",
+ "BriefDescription": "Retired loads that hit remote socket in modified state (Precise Event)"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "OFFCORE_REQUESTS.ANY",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS.ANY.READ",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "OFFCORE_REQUESTS.ANY.RFO",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.READ_CODE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.READ_DATA",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data read requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS.DEMAND.RFO",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests"
+ },
+ {
+ "EventCode": "0xB0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore L1 data cache writebacks"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ANY.READ_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore reads busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand code reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_CODE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand code read busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand data reads"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.READ_DATA_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand data read busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding offcore demand RFOs"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND.RFO_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles offcore demand RFOs busy",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_REQUESTS_SQ_FULL",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests blocked due to Super Queue full"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SQ_MISC.LRU_HINTS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue LRU hints sent to LLC"
+ },
+ {
+ "EventCode": "0xF4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue lock splits across a cache line"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "STORE_BLOCKS.AT_RET",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Loads delayed with at-Retirement block code"
+ },
+ {
+ "EventCode": "0x6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "STORE_BLOCKS.L1D_BLOCK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Cacheable loads delayed with L1D block code"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x0",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x400",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "100",
+ "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x80",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "1000",
+ "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x10",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10000",
+ "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5",
+ "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x800",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50",
+ "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x100",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "500",
+ "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x20",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "5000",
+ "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "3",
+ "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x4",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "50000",
+ "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x1000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20",
+ "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x200",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "200",
+ "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x40",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x8",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xB",
+ "MSRValue": "0x2000",
+ "Counter": "3",
+ "UMask": "0x10",
+ "EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
+ "MSRIndex": "0x3F6",
+ "SampleAfterValue": "10",
+ "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F11",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF11",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x111",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x211",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x411",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x711",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4711",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F44",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF44",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x144",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x244",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x444",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x744",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4744",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7FFF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFFFF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x80FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x47FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x18FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x38FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x10FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F22",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF22",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x122",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x222",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x422",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x722",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4722",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F08",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF08",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore writebacks",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x108",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x408",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x708",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4708",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F77",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF77",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore code or data read requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x177",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x277",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x477",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x777",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4777",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F33",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any cache_dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF33",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any location",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x133",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x233",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x433",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x733",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = local cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4733",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = local cache or dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = remote cache or dram",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache ",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F03",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF03",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand data requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x103",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x203",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x403",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x703",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4703",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F01",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF01",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x101",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x201",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x401",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x701",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4701",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F04",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF04",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x104",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x204",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x404",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x704",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4704",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F02",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF02",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore demand RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x102",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x202",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x402",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x702",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4702",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F80",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF80",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore other requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x180",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x280",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x480",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x780",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4780",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F30",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF30",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch data requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x130",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x230",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x430",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x730",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4730",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F10",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF10",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch data reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x110",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x210",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x410",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x710",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4710",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F40",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF40",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch code reads",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x140",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x240",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x440",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x740",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4740",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F20",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF20",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch RFO requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x120",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x220",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x420",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x720",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4720",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x7F70",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xFF70",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "All offcore prefetch requests",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x8070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x170",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x270",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x470",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x770",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4770",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x3870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x1070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json b/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
new file mode 100644
index 000000000000..7d2f71a9dee3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereex/floating-point.json
@@ -0,0 +1,229 @@
+[
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_ASSIST.ALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_ASSIST.INPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xF7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_ASSIST.OUTPUT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_COMP_OPS_EXE.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MMX Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP double precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE and SSE2 FP Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP packed Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE FP scalar Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE* FP single precision Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "SSE2 integer Uops"
+ },
+ {
+ "EventCode": "0x10",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Computational floating-point operations executed"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "FP_MMX_TRANS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All Floating Point to and from MMX transitions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "FP_MMX_TRANS.TO_FP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from MMX to Floating Point instructions"
+ },
+ {
+ "EventCode": "0xCC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "FP_MMX_TRANS.TO_MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Transitions from Floating Point to MMX instructions"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_128.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer pack operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_128.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer arithmetic operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_128.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer logical operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_128.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer multiply operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_128.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shift operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_128.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer shuffle/move operations"
+ },
+ {
+ "EventCode": "0x12",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_128.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "128 bit SIMD integer unpack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SIMD_INT_64.PACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit pack operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "SIMD_INT_64.PACKED_ARITH",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit arithmetic operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SIMD_INT_64.PACKED_LOGICAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit logical operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SIMD_INT_64.PACKED_MPY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit packed multiply operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SIMD_INT_64.PACKED_SHIFT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shift operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "SIMD_INT_64.SHUFFLE_MOVE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit shuffle/move operations"
+ },
+ {
+ "EventCode": "0xFD",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SIMD_INT_64.UNPACK",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD integer 64 bit unpack operations"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/frontend.json b/tools/perf/pmu-events/arch/x86/westmereex/frontend.json
new file mode 100644
index 000000000000..e5e21e03444d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereex/frontend.json
@@ -0,0 +1,26 @@
+[
+ {
+ "EventCode": "0xD0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions decoded"
+ },
+ {
+ "EventCode": "0xA6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACRO_INSTS.FUSIONS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused instructions decoded"
+ },
+ {
+ "EventCode": "0x19",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "TWO_UOP_INSTS_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Two Uop instructions decoded"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/memory.json b/tools/perf/pmu-events/arch/x86/westmereex/memory.json
new file mode 100644
index 000000000000..3ba555e73cbd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereex/memory.json
@@ -0,0 +1,747 @@
+[
+ {
+ "EventCode": "0x5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MISALIGN_MEM_REF.STORE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Misaligned store references"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF811",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2011",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF844",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2044",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x60FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF8FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x40FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x20FF",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF822",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2022",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF808",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2008",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore writebacks to a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF877",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2077",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF833",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore request = all data, response = any LLC miss",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2033",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF803",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2003",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF801",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2001",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF804",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2004",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF802",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2002",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF880",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2080",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF830",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2030",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF810",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2010",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF840",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2040",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF820",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2020",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x6070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0xF870",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests that missed the LLC",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x4070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
+ "Offcore": "1"
+ },
+ {
+ "EventCode": "0xB7",
+ "MSRValue": "0x2070",
+ "Counter": "2",
+ "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
+ "MSRIndex": "0x1A6",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
+ "Offcore": "1"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/other.json b/tools/perf/pmu-events/arch/x86/westmereex/other.json
new file mode 100644
index 000000000000..85133d6a5ce0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereex/other.json
@@ -0,0 +1,287 @@
+[
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Early Branch Prediciton Unit clears"
+ },
+ {
+ "EventCode": "0xE8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Late Branch Prediction Unit clears"
+ },
+ {
+ "EventCode": "0xE5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch prediction unit missed call or return"
+ },
+ {
+ "EventCode": "0xD5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ES_REG_RENAMES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ES segment renames"
+ },
+ {
+ "EventCode": "0x6C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "IO_TRANSACTIONS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "I/O transactions"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "L1I.CYCLES_STALLED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch stall cycles"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "L1I.HITS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch hits"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "L1I.MISSES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I instruction fetch misses"
+ },
+ {
+ "EventCode": "0x80",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3",
+ "EventName": "L1I.READS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "L1I Instruction fetches"
+ },
+ {
+ "EventCode": "0x82",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LARGE_ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Large ITLB hit"
+ },
+ {
+ "EventCode": "0x3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_BLOCK.OVERLAP_STORE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Loads that partially overlap an earlier store"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "LOAD_DISPATCH.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All loads dispatched"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "LOAD_DISPATCH.MOB",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LOAD_DISPATCH.RS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched that bypass the MOB"
+ },
+ {
+ "EventCode": "0x13",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "LOAD_DISPATCH.RS_DELAYED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loads dispatched from stage 305"
+ },
+ {
+ "EventCode": "0x7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "PARTIAL_ADDRESS_ALIAS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "False dependencies due to partial address aliasing"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "All RAT stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Flag stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Partial register stall cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB read port stalls cycles"
+ },
+ {
+ "EventCode": "0xD2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Scoreboard stall cycles"
+ },
+ {
+ "EventCode": "0x4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "SB_DRAIN.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All Store buffer stall cycles"
+ },
+ {
+ "EventCode": "0xD4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SEG_RENAME_STALLS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Segment rename stall cycles"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SNOOP_RESPONSE.HIT",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HIT to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SNOOP_RESPONSE.HITE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITE to snoop"
+ },
+ {
+ "EventCode": "0xB8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SNOOP_RESPONSE.HITM",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Thread responded HITM to snoop"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS.CODE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop code requests"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS.DATA",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop data requests"
+ },
+ {
+ "EventCode": "0xB4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS.INVALIDATE",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Snoop invalidate requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop code requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x4",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.CODE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop code requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop data requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x1",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.DATA_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop data requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Outstanding snoop invalidate requests"
+ },
+ {
+ "EventCode": "0xB3",
+ "UMask": "0x2",
+ "EventName": "SNOOPQ_REQUESTS_OUTSTANDING.INVALIDATE_NOT_EMPTY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles snoop invalidate requests queued",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xF6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SQ_FULL_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Super Queue full stall cycles"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json b/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
new file mode 100644
index 000000000000..799c57d94c39
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereex/pipeline.json
@@ -0,0 +1,905 @@
+[
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.CYCLES_DIV_BUSY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles the divider is busy"
+ },
+ {
+ "EventCode": "0x14",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ARITH.DIV",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Divide Operations executed",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0x14",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ARITH.MUL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Multiply operations executed"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BACLEAR.BAD_TARGET",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted with bad target address"
+ },
+ {
+ "EventCode": "0xE6",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR.CLEAR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "BACLEAR asserted, regardless of cause "
+ },
+ {
+ "EventCode": "0xA7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BACLEAR_FORCE_IQ",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction queue forced BACLEAR"
+ },
+ {
+ "EventCode": "0xE0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_DECODED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Branch instructions decoded"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_INST_EXEC.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_EXEC.COND",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Conditional branch instructions executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_EXEC.DIRECT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Unconditional branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Unconditional call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_INST_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_INST_EXEC.NON_CALLS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "All non call branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_INST_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Indirect return branches executed"
+ },
+ {
+ "EventCode": "0x88",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_INST_EXEC.TAKEN",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired conditional branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC4",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Retired near call instructions (Precise Event)"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7f",
+ "EventName": "BR_MISP_EXEC.ANY",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_EXEC.COND",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted conditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_EXEC.DIRECT",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted unconditional branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted indirect non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x30",
+ "EventName": "BR_MISP_EXEC.NEAR_CALLS",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x7",
+ "EventName": "BR_MISP_EXEC.NON_CALLS",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted non call branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "BR_MISP_EXEC.RETURN_NEAR",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted return branches executed"
+ },
+ {
+ "EventCode": "0x89",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "BR_MISP_EXEC.TAKEN",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted taken branches executed"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted retired branch instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Mispredicted conditional retired branches (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC5",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "SampleAfterValue": "2000",
+ "BriefDescription": "Mispredicted near retired calls (Precise Event)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reference cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_P",
+ "SampleAfterValue": "100000",
+ "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 2",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (fixed counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when thread is not halted (programmable counter)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total CPU cycles",
+ "CounterMask": "2"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0xf",
+ "EventName": "ILD_STALL.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Any Instruction Length Decoder stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instruction Queue full stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Length Change Prefix stall cycles"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ILD_STALL.MRU",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stall cycles due to BPU MRU bypass"
+ },
+ {
+ "EventCode": "0x87",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "ILD_STALL.REGEN",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Regen stall cycles"
+ },
+ {
+ "EventCode": "0x18",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_DECODED.DEC0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions that must be decoded by decoder 0"
+ },
+ {
+ "EventCode": "0x1E",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles instructions are written to the instruction queue"
+ },
+ {
+ "EventCode": "0x17",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_QUEUE_WRITES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions written to instruction queue."
+ },
+ {
+ "EventCode": "0x0",
+ "Counter": "Fixed counter 1",
+ "UMask": "0x0",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (fixed counter)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Instructions retired (Programmable counter and Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "INST_RETIRED.MMX",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired MMX instructions (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC0",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "INST_RETIRED.X87",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retired floating-point operations (Precise Event)"
+ },
+ {
+ "EventCode": "0x4C",
+ "Counter": "0,1",
+ "UMask": "0x1",
+ "EventName": "LOAD_HIT_PRE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Load operations conflicting with software prefetches"
+ },
+ {
+ "EventCode": "0xA8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles when uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xA8",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD.INACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no uops were delivered by the LSD",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0x20",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "LSD_OVERFLOW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Loops that can't stream from the instruction queue"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Cycles machine clear asserted"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "MACHINE_CLEARS.MEM_ORDER",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts"
+ },
+ {
+ "EventCode": "0xC3",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20000",
+ "BriefDescription": "Self-Modifying Code detected"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "RESOURCE_STALLS.FPCW",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "FPU control word write stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "RESOURCE_STALLS.LOAD",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Load buffer stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "RESOURCE_STALLS.MXCSR",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "MXCSR rename stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "RESOURCE_STALLS.OTHER",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Other Resource related stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "RESOURCE_STALLS.ROB_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ROB full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.RS_FULL",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Reservation Station full stall cycles"
+ },
+ {
+ "EventCode": "0xA2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "RESOURCE_STALLS.STORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Store buffer stall cycles"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC7",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)"
+ },
+ {
+ "EventCode": "0x3C",
+ "Counter": "0,1,2,3",
+ "UMask": "0x0",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles thread is active"
+ },
+ {
+ "EventCode": "0xDB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOP_UNFUSION",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uop unfusions due to FP exceptions"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_DECODED.ESP_FOLDING",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer instructions decoded"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "EventName": "UOPS_DECODED.ESP_SYNC",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Stack pointer sync operations"
+ },
+ {
+ "EventCode": "0xD1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops decoded by Microcode Sequencer",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xD1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_DECODED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops are decoded",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on any port (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x3f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1f",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_EXECUTED.PORT0",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 0"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 0, 1 or 5"
+ },
+ {
+ "EventCode": "0xB1",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x40",
+ "EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_EXECUTED.PORT1",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 1"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT2_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 2 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT234_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued on ports 2, 3 or 4"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x8",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT3_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 3 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "AnyThread": "1",
+ "EventName": "UOPS_EXECUTED.PORT4_CORE",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 4 (core count)"
+ },
+ {
+ "EventCode": "0xB1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "UOPS_EXECUTED.PORT5",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops executed on port 5"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued on any thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "AnyThread": "1",
+ "EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops were issued on either thread",
+ "CounterMask": "1"
+ },
+ {
+ "EventCode": "0xE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_ISSUED.FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Fused Uops issued"
+ },
+ {
+ "EventCode": "0xE",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles no Uops were issued",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are being retired",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.ANY",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Macro-fused Uops retired (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Retirement slots used (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "CounterMask": "1"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC2",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "CounterMask": "16"
+ },
+ {
+ "PEBS": "2",
+ "EventCode": "0xC0",
+ "Invert": "1",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
new file mode 100644
index 000000000000..ad989207e8f8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json
@@ -0,0 +1,173 @@
+[
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_LOAD_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load misses"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "DTLB_LOAD_MISSES.LARGE_WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss large page walks"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_LOAD_MISSES.PDE_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss caused by low part of address"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "DTLB second level hit"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss page walks complete"
+ },
+ {
+ "EventCode": "0x8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_LOAD_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB load miss page walk cycles"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "DTLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "DTLB_MISSES.LARGE_WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB miss large page walks"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "DTLB_MISSES.PDE_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB misses caused by low part of address. Count also includes 2M page references because 2M pages do not use the PDE."
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "DTLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB first level misses but second level hit"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "DTLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "DTLB miss page walks"
+ },
+ {
+ "EventCode": "0x49",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "DTLB_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "DTLB miss page walk cycles"
+ },
+ {
+ "EventCode": "0x4F",
+ "Counter": "0,1,2,3",
+ "UMask": "0x10",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "Extended Page Table walk cycles"
+ },
+ {
+ "EventCode": "0xAE",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_FLUSH",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ITLB flushes"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC8",
+ "Counter": "0,1,2,3",
+ "UMask": "0x20",
+ "EventName": "ITLB_MISS_RETIRED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "ITLB_MISSES.ANY",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "ITLB_MISSES.LARGE_WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss large page walks"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x2",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "ITLB miss page walks"
+ },
+ {
+ "EventCode": "0x85",
+ "Counter": "0,1,2,3",
+ "UMask": "0x4",
+ "EventName": "ITLB_MISSES.WALK_CYCLES",
+ "SampleAfterValue": "2000000",
+ "BriefDescription": "ITLB miss page walk cycles"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xCB",
+ "Counter": "0,1,2,3",
+ "UMask": "0x80",
+ "EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired loads that miss the DTLB (Precise Event)"
+ },
+ {
+ "PEBS": "1",
+ "EventCode": "0xC",
+ "Counter": "0,1,2,3",
+ "UMask": "0x1",
+ "EventName": "MEM_STORE_RETIRED.DTLB_MISS",
+ "SampleAfterValue": "200000",
+ "BriefDescription": "Retired stores that miss the DTLB (Precise Event)"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 8a4ce492f7b2..6676c2dd6dcb 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -42,6 +42,8 @@ perf-y += backward-ring-buffer.o
perf-y += sdt.o
perf-y += is_printable_array.o
perf-y += bitmap.o
+perf-y += perf-hooks.o
+perf-y += clang.o
$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
$(call rule_mkdir)
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
index e6d1816e431a..42e892b1e979 100644
--- a/tools/perf/tests/backward-ring-buffer.c
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -97,7 +97,7 @@ int test__backward_ring_buffer(int subtest __maybe_unused)
evlist = perf_evlist__new();
if (!evlist) {
- pr_debug("No enough memory to create evlist\n");
+ pr_debug("Not enough memory to create evlist\n");
return TEST_FAIL;
}
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 2673e86ed50f..92343f43e44a 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -57,7 +57,7 @@ static struct {
} bpf_testcase_table[] = {
{
LLVM_TESTCASE_BASE,
- "Test basic BPF filtering",
+ "Basic BPF filtering",
"[basic_bpf_test]",
"fix 'perf test LLVM' first",
"load bpf object failed",
@@ -67,7 +67,7 @@ static struct {
#ifdef HAVE_BPF_PROLOGUE
{
LLVM_TESTCASE_BPF_PROLOGUE,
- "Test BPF prologue generation",
+ "BPF prologue generation",
"[bpf_prologue_test]",
"fix kbuild first",
"check your vmlinux setting?",
@@ -77,7 +77,7 @@ static struct {
#endif
{
LLVM_TESTCASE_BPF_RELOCATION,
- "Test BPF relocation checker",
+ "BPF relocation checker",
"[bpf_relocation_test]",
"fix 'perf test LLVM' first",
"libbpf error when dealing with relocation",
@@ -125,7 +125,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
/* Instead of perf_evlist__new_default, don't add default events */
evlist = perf_evlist__new();
if (!evlist) {
- pr_debug("No enough memory to create evlist\n");
+ pr_debug("Not enough memory to create evlist\n");
return TEST_FAIL;
}
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 778668a2a966..23605202d4a1 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -28,119 +28,119 @@ static struct test generic_tests[] = {
.func = test__vmlinux_matches_kallsyms,
},
{
- .desc = "detect openat syscall event",
+ .desc = "Detect openat syscall event",
.func = test__openat_syscall_event,
},
{
- .desc = "detect openat syscall event on all cpus",
+ .desc = "Detect openat syscall event on all cpus",
.func = test__openat_syscall_event_on_all_cpus,
},
{
- .desc = "read samples using the mmap interface",
+ .desc = "Read samples using the mmap interface",
.func = test__basic_mmap,
},
{
- .desc = "parse events tests",
+ .desc = "Parse event definition strings",
.func = test__parse_events,
},
{
- .desc = "Validate PERF_RECORD_* events & perf_sample fields",
+ .desc = "PERF_RECORD_* events & perf_sample fields",
.func = test__PERF_RECORD,
},
{
- .desc = "Test perf pmu format parsing",
+ .desc = "Parse perf pmu format",
.func = test__pmu,
},
{
- .desc = "Test dso data read",
+ .desc = "DSO data read",
.func = test__dso_data,
},
{
- .desc = "Test dso data cache",
+ .desc = "DSO data cache",
.func = test__dso_data_cache,
},
{
- .desc = "Test dso data reopen",
+ .desc = "DSO data reopen",
.func = test__dso_data_reopen,
},
{
- .desc = "roundtrip evsel->name check",
+ .desc = "Roundtrip evsel->name",
.func = test__perf_evsel__roundtrip_name_test,
},
{
- .desc = "Check parsing of sched tracepoints fields",
+ .desc = "Parse sched tracepoints fields",
.func = test__perf_evsel__tp_sched_test,
},
{
- .desc = "Generate and check syscalls:sys_enter_openat event fields",
+ .desc = "syscalls:sys_enter_openat event fields",
.func = test__syscall_openat_tp_fields,
},
{
- .desc = "struct perf_event_attr setup",
+ .desc = "Setup struct perf_event_attr",
.func = test__attr,
},
{
- .desc = "Test matching and linking multiple hists",
+ .desc = "Match and link multiple hists",
.func = test__hists_link,
},
{
- .desc = "Try 'import perf' in python, checking link problems",
+ .desc = "'import perf' in python",
.func = test__python_use,
},
{
- .desc = "Test breakpoint overflow signal handler",
+ .desc = "Breakpoint overflow signal handler",
.func = test__bp_signal,
},
{
- .desc = "Test breakpoint overflow sampling",
+ .desc = "Breakpoint overflow sampling",
.func = test__bp_signal_overflow,
},
{
- .desc = "Test number of exit event of a simple workload",
+ .desc = "Number of exit events of a simple workload",
.func = test__task_exit,
},
{
- .desc = "Test software clock events have valid period values",
+ .desc = "Software clock events period values",
.func = test__sw_clock_freq,
},
{
- .desc = "Test object code reading",
+ .desc = "Object code reading",
.func = test__code_reading,
},
{
- .desc = "Test sample parsing",
+ .desc = "Sample parsing",
.func = test__sample_parsing,
},
{
- .desc = "Test using a dummy software event to keep tracking",
+ .desc = "Use a dummy software event to keep tracking",
.func = test__keep_tracking,
},
{
- .desc = "Test parsing with no sample_id_all bit set",
+ .desc = "Parse with no sample_id_all bit set",
.func = test__parse_no_sample_id_all,
},
{
- .desc = "Test filtering hist entries",
+ .desc = "Filter hist entries",
.func = test__hists_filter,
},
{
- .desc = "Test mmap thread lookup",
+ .desc = "Lookup mmap thread",
.func = test__mmap_thread_lookup,
},
{
- .desc = "Test thread mg sharing",
+ .desc = "Share thread mg",
.func = test__thread_mg_share,
},
{
- .desc = "Test output sorting of hist entries",
+ .desc = "Sort output of hist entries",
.func = test__hists_output,
},
{
- .desc = "Test cumulation of child hist entries",
+ .desc = "Cumulate child hist entries",
.func = test__hists_cumulate,
},
{
- .desc = "Test tracking with sched_switch",
+ .desc = "Track with sched_switch",
.func = test__switch_tracking,
},
{
@@ -152,15 +152,15 @@ static struct test generic_tests[] = {
.func = test__fdarray__add,
},
{
- .desc = "Test kmod_path__parse function",
+ .desc = "kmod_path__parse",
.func = test__kmod_path__parse,
},
{
- .desc = "Test thread map",
+ .desc = "Thread map",
.func = test__thread_map,
},
{
- .desc = "Test LLVM searching and compiling",
+ .desc = "LLVM search and compile",
.func = test__llvm,
.subtest = {
.skip_if_fail = true,
@@ -169,11 +169,11 @@ static struct test generic_tests[] = {
},
},
{
- .desc = "Test topology in session",
+ .desc = "Session topology",
.func = test_session_topology,
},
{
- .desc = "Test BPF filter",
+ .desc = "BPF filter",
.func = test__bpf,
.subtest = {
.skip_if_fail = true,
@@ -182,54 +182,67 @@ static struct test generic_tests[] = {
},
},
{
- .desc = "Test thread map synthesize",
+ .desc = "Synthesize thread map",
.func = test__thread_map_synthesize,
},
{
- .desc = "Test cpu map synthesize",
+ .desc = "Synthesize cpu map",
.func = test__cpu_map_synthesize,
},
{
- .desc = "Test stat config synthesize",
+ .desc = "Synthesize stat config",
.func = test__synthesize_stat_config,
},
{
- .desc = "Test stat synthesize",
+ .desc = "Synthesize stat",
.func = test__synthesize_stat,
},
{
- .desc = "Test stat round synthesize",
+ .desc = "Synthesize stat round",
.func = test__synthesize_stat_round,
},
{
- .desc = "Test attr update synthesize",
+ .desc = "Synthesize attr update",
.func = test__event_update,
},
{
- .desc = "Test events times",
+ .desc = "Event times",
.func = test__event_times,
},
{
- .desc = "Test backward reading from ring buffer",
+ .desc = "Read backward ring buffer",
.func = test__backward_ring_buffer,
},
{
- .desc = "Test cpu map print",
+ .desc = "Print cpu map",
.func = test__cpu_map_print,
},
{
- .desc = "Test SDT event probing",
+ .desc = "Probe SDT events",
.func = test__sdt_event,
},
{
- .desc = "Test is_printable_array function",
+ .desc = "is_printable_array",
.func = test__is_printable_array,
},
{
- .desc = "Test bitmap print",
+ .desc = "Print bitmap",
.func = test__bitmap_print,
},
{
+ .desc = "perf hooks",
+ .func = test__perf_hooks,
+ },
+ {
+ .desc = "builtin clang support",
+ .func = test__clang,
+ .subtest = {
+ .skip_if_fail = true,
+ .get_nr = test__clang_subtest_get_nr,
+ .get_desc = test__clang_subtest_get_desc,
+ }
+ },
+ {
.func = NULL,
},
};
diff --git a/tools/perf/tests/clang.c b/tools/perf/tests/clang.c
new file mode 100644
index 000000000000..f853e242a86c
--- /dev/null
+++ b/tools/perf/tests/clang.c
@@ -0,0 +1,46 @@
+#include "tests.h"
+#include "debug.h"
+#include "util.h"
+#include "c++/clang-c.h"
+
+static struct {
+ int (*func)(void);
+ const char *desc;
+} clang_testcase_table[] = {
+#ifdef HAVE_LIBCLANGLLVM_SUPPORT
+ {
+ .func = test__clang_to_IR,
+ .desc = "builtin clang compile C source to IR",
+ },
+ {
+ .func = test__clang_to_obj,
+ .desc = "builtin clang compile C source to ELF object",
+ },
+#endif
+};
+
+int test__clang_subtest_get_nr(void)
+{
+ return (int)ARRAY_SIZE(clang_testcase_table);
+}
+
+const char *test__clang_subtest_get_desc(int i)
+{
+ if (i < 0 || i >= (int)ARRAY_SIZE(clang_testcase_table))
+ return NULL;
+ return clang_testcase_table[i].desc;
+}
+
+#ifndef HAVE_LIBCLANGLLVM_SUPPORT
+int test__clang(int i __maybe_unused)
+{
+ return TEST_SKIP;
+}
+#else
+int test__clang(int i)
+{
+ if (i < 0 || i >= (int)ARRAY_SIZE(clang_testcase_table))
+ return TEST_FAIL;
+ return clang_testcase_table[i].func();
+}
+#endif
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
index b798a4bfd238..02a33ebcd992 100644
--- a/tools/perf/tests/llvm.c
+++ b/tools/perf/tests/llvm.c
@@ -34,19 +34,19 @@ static struct {
} bpf_source_table[__LLVM_TESTCASE_MAX] = {
[LLVM_TESTCASE_BASE] = {
.source = test_llvm__bpf_base_prog,
- .desc = "Basic BPF llvm compiling test",
+ .desc = "Basic BPF llvm compile",
},
[LLVM_TESTCASE_KBUILD] = {
.source = test_llvm__bpf_test_kbuild_prog,
- .desc = "Test kbuild searching",
+ .desc = "kbuild searching",
},
[LLVM_TESTCASE_BPF_PROLOGUE] = {
.source = test_llvm__bpf_test_prologue_prog,
- .desc = "Compile source for BPF prologue generation test",
+ .desc = "Compile source for BPF prologue generation",
},
[LLVM_TESTCASE_BPF_RELOCATION] = {
.source = test_llvm__bpf_test_relocation,
- .desc = "Compile source for BPF relocation test",
+ .desc = "Compile source for BPF relocation",
.should_load_fail = true,
},
};
diff --git a/tools/perf/tests/llvm.h b/tools/perf/tests/llvm.h
index 0eaa604be99d..b83571758d83 100644
--- a/tools/perf/tests/llvm.h
+++ b/tools/perf/tests/llvm.h
@@ -1,6 +1,10 @@
#ifndef PERF_TEST_LLVM_H
#define PERF_TEST_LLVM_H
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#include <stddef.h> /* for size_t */
#include <stdbool.h> /* for bool */
@@ -20,4 +24,7 @@ enum test_llvm__testcase {
int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz,
enum test_llvm__testcase index, bool force,
bool *should_load_fail);
+#ifdef __cplusplus
+}
+#endif
#endif
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 143f4d549769..0784748f1670 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -83,6 +83,7 @@ make_no_libbpf := NO_LIBBPF=1
make_no_libcrypto := NO_LIBCRYPTO=1
make_with_babeltrace:= LIBBABELTRACE=1
make_no_sdt := NO_SDT=1
+make_with_clangllvm := LIBCLANGLLVM=1
make_tags := tags
make_cscope := cscope
make_help := help
@@ -106,7 +107,7 @@ make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
-make_minimal += NO_LIBCRYPTO=1 NO_SDT=1
+make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1
# $(run) contains all available tests
run := make_pure
@@ -139,6 +140,7 @@ run += make_no_libbionic
run += make_no_auxtrace
run += make_no_libbpf
run += make_with_babeltrace
+run += make_with_clangllvm
run += make_help
run += make_doc
run += make_perf_o
@@ -278,7 +280,7 @@ endif
MAKEFLAGS := --no-print-directory
-clean := @(cd $(PERF); $(MAKE_F) -s $(O_OPT) clean >/dev/null)
+clean := @(cd $(PERF); $(MAKE_F) -s $(O_OPT) clean >/dev/null && $(MAKE) -s $(O_OPT) -C ../build clean >/dev/null)
$(run):
$(call clean)
diff --git a/tools/perf/tests/perf-hooks.c b/tools/perf/tests/perf-hooks.c
new file mode 100644
index 000000000000..665ecc19671c
--- /dev/null
+++ b/tools/perf/tests/perf-hooks.c
@@ -0,0 +1,48 @@
+#include <signal.h>
+#include <stdlib.h>
+
+#include "tests.h"
+#include "debug.h"
+#include "util.h"
+#include "perf-hooks.h"
+
+static void sigsegv_handler(int sig __maybe_unused)
+{
+ pr_debug("SIGSEGV is observed as expected, try to recover.\n");
+ perf_hooks__recover();
+ signal(SIGSEGV, SIG_DFL);
+ raise(SIGSEGV);
+ exit(-1);
+}
+
+
+static void the_hook(void *_hook_flags)
+{
+ int *hook_flags = _hook_flags;
+ int *p = NULL;
+
+ *hook_flags = 1234;
+
+ /* Generate a segfault, test perf_hooks__recover */
+ *p = 0;
+}
+
+int test__perf_hooks(int subtest __maybe_unused)
+{
+ int hook_flags = 0;
+
+ signal(SIGSEGV, sigsegv_handler);
+ perf_hooks__set_hook("test", the_hook, &hook_flags);
+ perf_hooks__invoke_test();
+
+ /* hook is triggered? */
+ if (hook_flags != 1234) {
+ pr_debug("Setting failed: %d (%p)\n", hook_flags, &hook_flags);
+ return TEST_FAIL;
+ }
+
+ /* the buggy hook is removed? */
+ if (perf_hooks__get_hook("test"))
+ return TEST_FAIL;
+ return TEST_OK;
+}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index 7c196c585472..0d7b251305af 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -91,6 +91,10 @@ int test__cpu_map_print(int subtest);
int test__sdt_event(int subtest);
int test__is_printable_array(int subtest);
int test__bitmap_print(int subtest);
+int test__perf_hooks(int subtest);
+int test__clang(int subtest);
+const char *test__clang_subtest_get_desc(int subtest);
+int test__clang_subtest_get_nr(void);
#if defined(__arm__) || defined(__aarch64__)
#ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 4c18271c71c9..ec7a30fad149 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -213,17 +213,17 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
ui_browser__write_nstring(browser, bf, printed);
if (change_color)
ui_browser__set_color(browser, color);
- if (dl->ins && dl->ins->ops->scnprintf) {
- if (ins__is_jump(dl->ins)) {
+ if (dl->ins.ops && dl->ins.ops->scnprintf) {
+ if (ins__is_jump(&dl->ins)) {
bool fwd = dl->ops.target.offset > (u64)dl->offset;
ui_browser__write_graph(browser, fwd ? SLSMG_DARROW_CHAR :
SLSMG_UARROW_CHAR);
SLsmg_write_char(' ');
- } else if (ins__is_call(dl->ins)) {
+ } else if (ins__is_call(&dl->ins)) {
ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
SLsmg_write_char(' ');
- } else if (ins__is_ret(dl->ins)) {
+ } else if (ins__is_ret(&dl->ins)) {
ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
SLsmg_write_char(' ');
} else {
@@ -243,7 +243,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
static bool disasm_line__is_valid_jump(struct disasm_line *dl, struct symbol *sym)
{
- if (!dl || !dl->ins || !ins__is_jump(dl->ins)
+ if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins)
|| !disasm_line__has_offset(dl)
|| dl->ops.target.offset >= symbol__size(sym))
return false;
@@ -492,7 +492,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
};
char title[SYM_TITLE_MAX_SIZE];
- if (!ins__is_call(dl->ins))
+ if (!ins__is_call(&dl->ins))
return false;
if (map_groups__find_ams(&target) ||
@@ -543,14 +543,16 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows
static bool annotate_browser__jump(struct annotate_browser *browser)
{
struct disasm_line *dl = browser->selection;
+ u64 offset;
s64 idx;
- if (!ins__is_jump(dl->ins))
+ if (!ins__is_jump(&dl->ins))
return false;
- dl = annotate_browser__find_offset(browser, dl->ops.target.offset, &idx);
+ offset = dl->ops.target.offset;
+ dl = annotate_browser__find_offset(browser, offset, &idx);
if (dl == NULL) {
- ui_helpline__puts("Invalid jump offset");
+ ui_helpline__printf("Invalid jump offset: %" PRIx64, offset);
return true;
}
@@ -841,9 +843,9 @@ show_help:
ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
else if (browser->selection->offset == -1)
ui_helpline__puts("Actions are only available for assembly lines.");
- else if (!browser->selection->ins)
+ else if (!browser->selection->ins.ops)
goto show_sup_ins;
- else if (ins__is_ret(browser->selection->ins))
+ else if (ins__is_ret(&browser->selection->ins))
goto out;
else if (!(annotate_browser__jump(browser) ||
annotate_browser__callq(browser, evsel, hbt))) {
@@ -1050,7 +1052,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
(nr_pcnt - 1);
}
- err = symbol__disassemble(sym, map, sizeof_bdl);
+ err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), sizeof_bdl);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a53fef0c673b..641b40234a9d 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -30,7 +30,7 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
static bool hist_browser__has_filter(struct hist_browser *hb)
{
- return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter;
+ return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter || hb->c2c_filter;
}
static int hist_browser__get_folding(struct hist_browser *browser)
@@ -738,6 +738,7 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
struct callchain_print_arg *arg)
{
char bf[1024], *alloc_str;
+ char buf[64], *alloc_str2;
const char *str;
if (arg->row_offset != 0) {
@@ -746,12 +747,26 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
}
alloc_str = NULL;
+ alloc_str2 = NULL;
+
str = callchain_list__sym_name(chain, bf, sizeof(bf),
browser->show_dso);
- if (need_percent) {
- char buf[64];
+ if (symbol_conf.show_branchflag_count) {
+ if (need_percent)
+ callchain_list_counts__printf_value(node, chain, NULL,
+ buf, sizeof(buf));
+ else
+ callchain_list_counts__printf_value(NULL, chain, NULL,
+ buf, sizeof(buf));
+
+ if (asprintf(&alloc_str2, "%s%s", str, buf) < 0)
+ str = "Not enough memory!";
+ else
+ str = alloc_str2;
+ }
+ if (need_percent) {
callchain_node__scnprintf_value(node, buf, sizeof(buf),
total);
@@ -764,6 +779,7 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
print(browser, chain, str, offset, row, arg);
free(alloc_str);
+ free(alloc_str2);
return 1;
}
@@ -2833,7 +2849,10 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
do_zoom_dso(browser, actions);
continue;
case 'V':
- browser->show_dso = !browser->show_dso;
+ verbose = (verbose + 1) % 4;
+ browser->show_dso = verbose > 0;
+ ui_helpline__fpush("Verbosity level set to %d\n",
+ verbose);
continue;
case 't':
actions->thread = thread;
diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
index 39bd0f28f211..23d6acb84800 100644
--- a/tools/perf/ui/browsers/hists.h
+++ b/tools/perf/ui/browsers/hists.h
@@ -18,6 +18,7 @@ struct hist_browser {
u64 nr_non_filtered_entries;
u64 nr_hierarchy_entries;
u64 nr_callchain_rows;
+ bool c2c_filter;
/* Get title string. */
int (*title)(struct hist_browser *browser,
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 42d319927762..8c9308ac30b7 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -167,7 +167,7 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
- err = symbol__disassemble(sym, map, 0);
+ err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
diff --git a/tools/perf/ui/helpline.c b/tools/perf/ui/helpline.c
index 5b74a7eba210..379039ab00d8 100644
--- a/tools/perf/ui/helpline.c
+++ b/tools/perf/ui/helpline.c
@@ -72,3 +72,13 @@ int ui_helpline__vshow(const char *fmt, va_list ap)
{
return helpline_fns->show(fmt, ap);
}
+
+void ui_helpline__printf(const char *fmt, ...)
+{
+ va_list ap;
+
+ ui_helpline__pop();
+ va_start(ap, fmt);
+ ui_helpline__vpush(fmt, ap);
+ va_end(ap);
+}
diff --git a/tools/perf/ui/helpline.h b/tools/perf/ui/helpline.h
index 46181f4fc07e..d52d0a1a881b 100644
--- a/tools/perf/ui/helpline.h
+++ b/tools/perf/ui/helpline.h
@@ -21,6 +21,7 @@ void ui_helpline__push(const char *msg);
void ui_helpline__vpush(const char *fmt, va_list ap);
void ui_helpline__fpush(const char *fmt, ...);
void ui_helpline__puts(const char *msg);
+void ui_helpline__printf(const char *fmt, ...);
int ui_helpline__vshow(const char *fmt, va_list ap);
extern char ui_helpline__current[512];
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 89d8441f9890..668f4aecf2e6 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -41,7 +41,9 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
{
int i;
size_t ret = 0;
- char bf[1024];
+ char bf[1024], *alloc_str = NULL;
+ char buf[64];
+ const char *str;
ret += callchain__fprintf_left_margin(fp, left_margin);
for (i = 0; i < depth; i++) {
@@ -56,8 +58,26 @@ static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
} else
ret += fprintf(fp, "%s", " ");
}
- fputs(callchain_list__sym_name(chain, bf, sizeof(bf), false), fp);
+
+ str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
+
+ if (symbol_conf.show_branchflag_count) {
+ if (!period)
+ callchain_list_counts__printf_value(node, chain, NULL,
+ buf, sizeof(buf));
+ else
+ callchain_list_counts__printf_value(NULL, chain, NULL,
+ buf, sizeof(buf));
+
+ if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
+ str = "Not enough memory!";
+ else
+ str = alloc_str;
+ }
+
+ fputs(str, fp);
fputc('\n', fp);
+ free(alloc_str);
return ret;
}
@@ -219,8 +239,15 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
} else
ret += callchain__fprintf_left_margin(fp, left_margin);
- ret += fprintf(fp, "%s\n", callchain_list__sym_name(chain, bf, sizeof(bf),
- false));
+ ret += fprintf(fp, "%s",
+ callchain_list__sym_name(chain, bf,
+ sizeof(bf),
+ false));
+
+ if (symbol_conf.show_branchflag_count)
+ ret += callchain_list_counts__printf_value(
+ NULL, chain, fp, NULL, 0);
+ ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
break;
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index eb60e613d795..3840e3a87057 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -87,6 +87,7 @@ libperf-y += help-unknown-cmd.o
libperf-y += mem-events.o
libperf-y += vsprintf.o
libperf-y += drv_configs.o
+libperf-y += time-utils.o
libperf-$(CONFIG_LIBBPF) += bpf-loader.o
libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
@@ -120,9 +121,13 @@ libperf-y += demangle-rust.o
ifdef CONFIG_JITDUMP
libperf-$(CONFIG_LIBELF) += jitdump.o
libperf-$(CONFIG_LIBELF) += genelf.o
-libperf-$(CONFIG_LIBELF) += genelf_debug.o
+libperf-$(CONFIG_DWARF) += genelf_debug.o
endif
+libperf-y += perf-hooks.o
+
+libperf-$(CONFIG_CXX) += c++/
+
CFLAGS_config.o += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
# avoid compiler warnings in 32-bit mode
CFLAGS_genelf_debug.o += -Wno-packed
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index aeb5a441bd74..ea7e0de4b9c1 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -18,16 +18,119 @@
#include "annotate.h"
#include "evsel.h"
#include "block-range.h"
+#include "arch/common.h"
#include <regex.h>
#include <pthread.h>
#include <linux/bitops.h>
+#include <sys/utsname.h>
const char *disassembler_style;
const char *objdump_path;
static regex_t file_lineno;
-static struct ins *ins__find(const char *name);
-static int disasm_line__parse(char *line, char **namep, char **rawp);
+static struct ins_ops *ins__find(struct arch *arch, const char *name);
+static void ins__sort(struct arch *arch);
+static int disasm_line__parse(char *line, const char **namep, char **rawp);
+
+struct arch {
+ const char *name;
+ struct ins *instructions;
+ size_t nr_instructions;
+ size_t nr_instructions_allocated;
+ struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
+ bool sorted_instructions;
+ bool initialized;
+ void *priv;
+ int (*init)(struct arch *arch);
+ struct {
+ char comment_char;
+ char skip_functions_char;
+ } objdump;
+};
+
+static struct ins_ops call_ops;
+static struct ins_ops dec_ops;
+static struct ins_ops jump_ops;
+static struct ins_ops mov_ops;
+static struct ins_ops nop_ops;
+static struct ins_ops lock_ops;
+static struct ins_ops ret_ops;
+
+static int arch__grow_instructions(struct arch *arch)
+{
+ struct ins *new_instructions;
+ size_t new_nr_allocated;
+
+ if (arch->nr_instructions_allocated == 0 && arch->instructions)
+ goto grow_from_non_allocated_table;
+
+ new_nr_allocated = arch->nr_instructions_allocated + 128;
+ new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
+ if (new_instructions == NULL)
+ return -1;
+
+out_update_instructions:
+ arch->instructions = new_instructions;
+ arch->nr_instructions_allocated = new_nr_allocated;
+ return 0;
+
+grow_from_non_allocated_table:
+ new_nr_allocated = arch->nr_instructions + 128;
+ new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
+ if (new_instructions == NULL)
+ return -1;
+
+ memcpy(new_instructions, arch->instructions, arch->nr_instructions);
+ goto out_update_instructions;
+}
+
+static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
+{
+ struct ins *ins;
+
+ if (arch->nr_instructions == arch->nr_instructions_allocated &&
+ arch__grow_instructions(arch))
+ return -1;
+
+ ins = &arch->instructions[arch->nr_instructions];
+ ins->name = strdup(name);
+ if (!ins->name)
+ return -1;
+
+ ins->ops = ops;
+ arch->nr_instructions++;
+
+ ins__sort(arch);
+ return 0;
+}
+
+#include "arch/arm/annotate/instructions.c"
+#include "arch/arm64/annotate/instructions.c"
+#include "arch/x86/annotate/instructions.c"
+#include "arch/powerpc/annotate/instructions.c"
+
+static struct arch architectures[] = {
+ {
+ .name = "arm",
+ .init = arm__annotate_init,
+ },
+ {
+ .name = "arm64",
+ .init = arm64__annotate_init,
+ },
+ {
+ .name = "x86",
+ .instructions = x86__instructions,
+ .nr_instructions = ARRAY_SIZE(x86__instructions),
+ .objdump = {
+ .comment_char = '#',
+ },
+ },
+ {
+ .name = "powerpc",
+ .init = powerpc__annotate_init,
+ },
+};
static void ins__delete(struct ins_operands *ops)
{
@@ -54,7 +157,7 @@ int ins__scnprintf(struct ins *ins, char *bf, size_t size,
return ins__raw_scnprintf(ins, bf, size, ops);
}
-static int call__parse(struct ins_operands *ops, struct map *map)
+static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
{
char *endptr, *tok, *name;
@@ -66,10 +169,9 @@ static int call__parse(struct ins_operands *ops, struct map *map)
name++;
-#ifdef __arm__
- if (strchr(name, '+'))
+ if (arch->objdump.skip_functions_char &&
+ strchr(name, arch->objdump.skip_functions_char))
return -1;
-#endif
tok = strchr(name, '>');
if (tok == NULL)
@@ -118,7 +220,7 @@ bool ins__is_call(const struct ins *ins)
return ins->ops == &call_ops;
}
-static int jump__parse(struct ins_operands *ops, struct map *map __maybe_unused)
+static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
{
const char *s = strchr(ops->raw, '+');
@@ -135,6 +237,9 @@ static int jump__parse(struct ins_operands *ops, struct map *map __maybe_unused)
static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops)
{
+ if (!ops->target.addr)
+ return ins__raw_scnprintf(ins, bf, size, ops);
+
return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
}
@@ -173,28 +278,22 @@ static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
return 0;
}
-static int lock__parse(struct ins_operands *ops, struct map *map)
+static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map *map)
{
- char *name;
-
ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
if (ops->locked.ops == NULL)
return 0;
- if (disasm_line__parse(ops->raw, &name, &ops->locked.ops->raw) < 0)
+ if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
goto out_free_ops;
- ops->locked.ins = ins__find(name);
- free(name);
+ ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
- if (ops->locked.ins == NULL)
+ if (ops->locked.ins.ops == NULL)
goto out_free_ops;
- if (!ops->locked.ins->ops)
- return 0;
-
- if (ops->locked.ins->ops->parse &&
- ops->locked.ins->ops->parse(ops->locked.ops, map) < 0)
+ if (ops->locked.ins.ops->parse &&
+ ops->locked.ins.ops->parse(arch, ops->locked.ops, map) < 0)
goto out_free_ops;
return 0;
@@ -209,19 +308,19 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
{
int printed;
- if (ops->locked.ins == NULL)
+ if (ops->locked.ins.ops == NULL)
return ins__raw_scnprintf(ins, bf, size, ops);
printed = scnprintf(bf, size, "%-6.6s ", ins->name);
- return printed + ins__scnprintf(ops->locked.ins, bf + printed,
+ return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
size - printed, ops->locked.ops);
}
static void lock__delete(struct ins_operands *ops)
{
- struct ins *ins = ops->locked.ins;
+ struct ins *ins = &ops->locked.ins;
- if (ins && ins->ops->free)
+ if (ins->ops && ins->ops->free)
ins->ops->free(ops->locked.ops);
else
ins__delete(ops->locked.ops);
@@ -237,7 +336,7 @@ static struct ins_ops lock_ops = {
.scnprintf = lock__scnprintf,
};
-static int mov__parse(struct ins_operands *ops, struct map *map __maybe_unused)
+static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *map __maybe_unused)
{
char *s = strchr(ops->raw, ','), *target, *comment, prev;
@@ -252,11 +351,7 @@ static int mov__parse(struct ins_operands *ops, struct map *map __maybe_unused)
return -1;
target = ++s;
-#ifdef __arm__
- comment = strchr(s, ';');
-#else
- comment = strchr(s, '#');
-#endif
+ comment = strchr(s, arch->objdump.comment_char);
if (comment != NULL)
s = comment - 1;
@@ -304,7 +399,7 @@ static struct ins_ops mov_ops = {
.scnprintf = mov__scnprintf,
};
-static int dec__parse(struct ins_operands *ops, struct map *map __maybe_unused)
+static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused)
{
char *target, *comment, *s, prev;
@@ -321,7 +416,7 @@ static int dec__parse(struct ins_operands *ops, struct map *map __maybe_unused)
if (ops->target.raw == NULL)
return -1;
- comment = strchr(s, '#');
+ comment = strchr(s, arch->objdump.comment_char);
if (comment == NULL)
return 0;
@@ -364,99 +459,6 @@ bool ins__is_ret(const struct ins *ins)
return ins->ops == &ret_ops;
}
-static struct ins instructions[] = {
- { .name = "add", .ops = &mov_ops, },
- { .name = "addl", .ops = &mov_ops, },
- { .name = "addq", .ops = &mov_ops, },
- { .name = "addw", .ops = &mov_ops, },
- { .name = "and", .ops = &mov_ops, },
-#ifdef __arm__
- { .name = "b", .ops = &jump_ops, }, // might also be a call
- { .name = "bcc", .ops = &jump_ops, },
- { .name = "bcs", .ops = &jump_ops, },
- { .name = "beq", .ops = &jump_ops, },
- { .name = "bge", .ops = &jump_ops, },
- { .name = "bgt", .ops = &jump_ops, },
- { .name = "bhi", .ops = &jump_ops, },
- { .name = "bl", .ops = &call_ops, },
- { .name = "bls", .ops = &jump_ops, },
- { .name = "blt", .ops = &jump_ops, },
- { .name = "blx", .ops = &call_ops, },
- { .name = "bne", .ops = &jump_ops, },
-#endif
- { .name = "bts", .ops = &mov_ops, },
- { .name = "call", .ops = &call_ops, },
- { .name = "callq", .ops = &call_ops, },
- { .name = "cmp", .ops = &mov_ops, },
- { .name = "cmpb", .ops = &mov_ops, },
- { .name = "cmpl", .ops = &mov_ops, },
- { .name = "cmpq", .ops = &mov_ops, },
- { .name = "cmpw", .ops = &mov_ops, },
- { .name = "cmpxch", .ops = &mov_ops, },
- { .name = "dec", .ops = &dec_ops, },
- { .name = "decl", .ops = &dec_ops, },
- { .name = "imul", .ops = &mov_ops, },
- { .name = "inc", .ops = &dec_ops, },
- { .name = "incl", .ops = &dec_ops, },
- { .name = "ja", .ops = &jump_ops, },
- { .name = "jae", .ops = &jump_ops, },
- { .name = "jb", .ops = &jump_ops, },
- { .name = "jbe", .ops = &jump_ops, },
- { .name = "jc", .ops = &jump_ops, },
- { .name = "jcxz", .ops = &jump_ops, },
- { .name = "je", .ops = &jump_ops, },
- { .name = "jecxz", .ops = &jump_ops, },
- { .name = "jg", .ops = &jump_ops, },
- { .name = "jge", .ops = &jump_ops, },
- { .name = "jl", .ops = &jump_ops, },
- { .name = "jle", .ops = &jump_ops, },
- { .name = "jmp", .ops = &jump_ops, },
- { .name = "jmpq", .ops = &jump_ops, },
- { .name = "jna", .ops = &jump_ops, },
- { .name = "jnae", .ops = &jump_ops, },
- { .name = "jnb", .ops = &jump_ops, },
- { .name = "jnbe", .ops = &jump_ops, },
- { .name = "jnc", .ops = &jump_ops, },
- { .name = "jne", .ops = &jump_ops, },
- { .name = "jng", .ops = &jump_ops, },
- { .name = "jnge", .ops = &jump_ops, },
- { .name = "jnl", .ops = &jump_ops, },
- { .name = "jnle", .ops = &jump_ops, },
- { .name = "jno", .ops = &jump_ops, },
- { .name = "jnp", .ops = &jump_ops, },
- { .name = "jns", .ops = &jump_ops, },
- { .name = "jnz", .ops = &jump_ops, },
- { .name = "jo", .ops = &jump_ops, },
- { .name = "jp", .ops = &jump_ops, },
- { .name = "jpe", .ops = &jump_ops, },
- { .name = "jpo", .ops = &jump_ops, },
- { .name = "jrcxz", .ops = &jump_ops, },
- { .name = "js", .ops = &jump_ops, },
- { .name = "jz", .ops = &jump_ops, },
- { .name = "lea", .ops = &mov_ops, },
- { .name = "lock", .ops = &lock_ops, },
- { .name = "mov", .ops = &mov_ops, },
- { .name = "movb", .ops = &mov_ops, },
- { .name = "movdqa",.ops = &mov_ops, },
- { .name = "movl", .ops = &mov_ops, },
- { .name = "movq", .ops = &mov_ops, },
- { .name = "movslq", .ops = &mov_ops, },
- { .name = "movzbl", .ops = &mov_ops, },
- { .name = "movzwl", .ops = &mov_ops, },
- { .name = "nop", .ops = &nop_ops, },
- { .name = "nopl", .ops = &nop_ops, },
- { .name = "nopw", .ops = &nop_ops, },
- { .name = "or", .ops = &mov_ops, },
- { .name = "orl", .ops = &mov_ops, },
- { .name = "test", .ops = &mov_ops, },
- { .name = "testb", .ops = &mov_ops, },
- { .name = "testl", .ops = &mov_ops, },
- { .name = "xadd", .ops = &mov_ops, },
- { .name = "xbeginl", .ops = &jump_ops, },
- { .name = "xbeginq", .ops = &jump_ops, },
- { .name = "retq", .ops = &ret_ops, },
-};
-
static int ins__key_cmp(const void *name, const void *insp)
{
const struct ins *ins = insp;
@@ -472,24 +474,70 @@ static int ins__cmp(const void *a, const void *b)
return strcmp(ia->name, ib->name);
}
-static void ins__sort(void)
+static void ins__sort(struct arch *arch)
+{
+ const int nmemb = arch->nr_instructions;
+
+ qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
+}
+
+static struct ins_ops *__ins__find(struct arch *arch, const char *name)
+{
+ struct ins *ins;
+ const int nmemb = arch->nr_instructions;
+
+ if (!arch->sorted_instructions) {
+ ins__sort(arch);
+ arch->sorted_instructions = true;
+ }
+
+ ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
+ return ins ? ins->ops : NULL;
+}
+
+static struct ins_ops *ins__find(struct arch *arch, const char *name)
+{
+ struct ins_ops *ops = __ins__find(arch, name);
+
+ if (!ops && arch->associate_instruction_ops)
+ ops = arch->associate_instruction_ops(arch, name);
+
+ return ops;
+}
+
+static int arch__key_cmp(const void *name, const void *archp)
+{
+ const struct arch *arch = archp;
+
+ return strcmp(name, arch->name);
+}
+
+static int arch__cmp(const void *a, const void *b)
{
- const int nmemb = ARRAY_SIZE(instructions);
+ const struct arch *aa = a;
+ const struct arch *ab = b;
- qsort(instructions, nmemb, sizeof(struct ins), ins__cmp);
+ return strcmp(aa->name, ab->name);
}
-static struct ins *ins__find(const char *name)
+static void arch__sort(void)
{
- const int nmemb = ARRAY_SIZE(instructions);
+ const int nmemb = ARRAY_SIZE(architectures);
+
+ qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
+}
+
+static struct arch *arch__find(const char *name)
+{
+ const int nmemb = ARRAY_SIZE(architectures);
static bool sorted;
if (!sorted) {
- ins__sort();
+ arch__sort();
sorted = true;
}
- return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__key_cmp);
+ return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
}
int symbol__alloc_hist(struct symbol *sym)
@@ -709,21 +757,18 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
}
-static void disasm_line__init_ins(struct disasm_line *dl, struct map *map)
+static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map)
{
- dl->ins = ins__find(dl->name);
+ dl->ins.ops = ins__find(arch, dl->ins.name);
- if (dl->ins == NULL)
+ if (!dl->ins.ops)
return;
- if (!dl->ins->ops)
- return;
-
- if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops, map) < 0)
- dl->ins = NULL;
+ if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, map) < 0)
+ dl->ins.ops = NULL;
}
-static int disasm_line__parse(char *line, char **namep, char **rawp)
+static int disasm_line__parse(char *line, const char **namep, char **rawp)
{
char *name = line, tmp;
@@ -756,12 +801,14 @@ static int disasm_line__parse(char *line, char **namep, char **rawp)
return 0;
out_free_name:
- zfree(namep);
+ free((void *)namep);
+ *namep = NULL;
return -1;
}
static struct disasm_line *disasm_line__new(s64 offset, char *line,
size_t privsize, int line_nr,
+ struct arch *arch,
struct map *map)
{
struct disasm_line *dl = zalloc(sizeof(*dl) + privsize);
@@ -774,10 +821,10 @@ static struct disasm_line *disasm_line__new(s64 offset, char *line,
goto out_delete;
if (offset != -1) {
- if (disasm_line__parse(dl->line, &dl->name, &dl->ops.raw) < 0)
+ if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0)
goto out_free_line;
- disasm_line__init_ins(dl, map);
+ disasm_line__init_ins(dl, arch, map);
}
}
@@ -793,20 +840,21 @@ out_delete:
void disasm_line__free(struct disasm_line *dl)
{
zfree(&dl->line);
- zfree(&dl->name);
- if (dl->ins && dl->ins->ops->free)
- dl->ins->ops->free(&dl->ops);
+ if (dl->ins.ops && dl->ins.ops->free)
+ dl->ins.ops->free(&dl->ops);
else
ins__delete(&dl->ops);
+ free((void *)dl->ins.name);
+ dl->ins.name = NULL;
free(dl);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw)
{
- if (raw || !dl->ins)
- return scnprintf(bf, size, "%-6.6s %s", dl->name, dl->ops.raw);
+ if (raw || !dl->ins.ops)
+ return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw);
- return ins__scnprintf(dl->ins, bf, size, &dl->ops);
+ return ins__scnprintf(&dl->ins, bf, size, &dl->ops);
}
static void disasm__add(struct list_head *head, struct disasm_line *line)
@@ -1087,6 +1135,7 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
* The ops.raw part will be parsed further according to type of the instruction.
*/
static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
+ struct arch *arch,
FILE *file, size_t privsize,
int *line_nr)
{
@@ -1149,7 +1198,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
parsed_line = tmp2 + 1;
}
- dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, map);
+ dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map);
free(line);
(*line_nr)++;
@@ -1161,7 +1210,7 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
map__rip_2objdump(map, sym->start);
/* kcore has no symbols, so add the call target name */
- if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) {
+ if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) {
struct addr_map_symbol target = {
.map = map,
.addr = dl->ops.target.addr,
@@ -1191,8 +1240,8 @@ static void delete_last_nop(struct symbol *sym)
while (!list_empty(list)) {
dl = list_entry(list->prev, struct disasm_line, node);
- if (dl->ins && dl->ins->ops) {
- if (dl->ins->ops != &nop_ops)
+ if (dl->ins.ops) {
+ if (dl->ins.ops != &nop_ops)
return;
} else {
if (!strstr(dl->line, " nop ") &&
@@ -1280,10 +1329,23 @@ fallback:
return 0;
}
-int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize)
+static const char *annotate__norm_arch(const char *arch_name)
+{
+ struct utsname uts;
+
+ if (!arch_name) { /* Assume we are annotating locally. */
+ if (uname(&uts) < 0)
+ return NULL;
+ arch_name = uts.machine;
+ }
+ return normalize_arch((char *)arch_name);
+}
+
+int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize)
{
struct dso *dso = map->dso;
char command[PATH_MAX * 2];
+ struct arch *arch = NULL;
FILE *file;
char symfs_filename[PATH_MAX];
struct kcore_extract kce;
@@ -1297,6 +1359,22 @@ int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize)
if (err)
return err;
+ arch_name = annotate__norm_arch(arch_name);
+ if (!arch_name)
+ return -1;
+
+ arch = arch__find(arch_name);
+ if (arch == NULL)
+ return -ENOTSUP;
+
+ if (arch->init) {
+ err = arch->init(arch);
+ if (err) {
+ pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name);
+ return err;
+ }
+ }
+
pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
symfs_filename, sym->name, map->unmap_ip(map, sym->start),
map->unmap_ip(map, sym->end));
@@ -1395,7 +1473,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize)
nline = 0;
while (!feof(file)) {
- if (symbol__parse_objdump_line(sym, map, file, privsize,
+ if (symbol__parse_objdump_line(sym, map, arch, file, privsize,
&lineno) < 0)
break;
nline++;
@@ -1764,7 +1842,7 @@ static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp)
if (dl->offset == -1)
return fprintf(fp, "%s\n", dl->line);
- printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->name);
+ printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name);
if (dl->ops.raw[0] != '\0') {
printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ",
@@ -1793,7 +1871,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct rb_root source_line = RB_ROOT;
u64 len;
- if (symbol__disassemble(sym, map, 0) < 0)
+ if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0) < 0)
return -1;
len = symbol__size(sym);
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 5bbcec173b82..87e4cadc5d27 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -11,7 +11,12 @@
#include <linux/rbtree.h>
#include <pthread.h>
-struct ins;
+struct ins_ops;
+
+struct ins {
+ const char *name;
+ struct ins_ops *ops;
+};
struct ins_operands {
char *raw;
@@ -28,24 +33,21 @@ struct ins_operands {
u64 addr;
} source;
struct {
- struct ins *ins;
+ struct ins ins;
struct ins_operands *ops;
} locked;
};
};
+struct arch;
+
struct ins_ops {
void (*free)(struct ins_operands *ops);
- int (*parse)(struct ins_operands *ops, struct map *map);
+ int (*parse)(struct arch *arch, struct ins_operands *ops, struct map *map);
int (*scnprintf)(struct ins *ins, char *bf, size_t size,
struct ins_operands *ops);
};
-struct ins {
- const char *name;
- struct ins_ops *ops;
-};
-
bool ins__is_jump(const struct ins *ins);
bool ins__is_call(const struct ins *ins);
bool ins__is_ret(const struct ins *ins);
@@ -57,8 +59,7 @@ struct disasm_line {
struct list_head node;
s64 offset;
char *line;
- char *name;
- struct ins *ins;
+ struct ins ins;
int line_nr;
float ipc;
u64 cycles;
@@ -156,7 +157,7 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
-int symbol__disassemble(struct symbol *sym, struct map *map, size_t privsize);
+int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize);
enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__SUCCESS = 0,
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 2b2c9b82f5ab..36c861103291 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -14,11 +14,11 @@
#include "debug.h"
#include "bpf-loader.h"
#include "bpf-prologue.h"
-#include "llvm-utils.h"
#include "probe-event.h"
#include "probe-finder.h" // for MAX_PROBES
#include "parse-events.h"
#include "llvm-utils.h"
+#include "c++/clang-c.h"
#define DEFINE_PRINT_FN(name, level) \
static int libbpf_##name(const char *fmt, ...) \
@@ -86,10 +86,21 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
void *obj_buf;
size_t obj_buf_sz;
- err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
- if (err)
- return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
+ perf_clang__init();
+ err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
+ perf_clang__cleanup();
+ if (err) {
+ pr_warning("bpf: builtin compilation failed: %d, try external compiler\n", err);
+ err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
+ if (err)
+ return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
+ } else
+ pr_debug("bpf: successfull builtin compilation\n");
obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
+
+ if (!IS_ERR(obj) && llvm_param.dump_obj)
+ llvm__dump_obj(filename, obj_buf, obj_buf_sz);
+
free(obj_buf);
} else
obj = bpf_object__open(filename);
@@ -241,7 +252,7 @@ parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
int err = 0;
if (!text) {
- pr_debug("No enough memory: dup config_str failed\n");
+ pr_debug("Not enough memory: dup config_str failed\n");
return ERR_PTR(-ENOMEM);
}
@@ -531,7 +542,7 @@ static int map_prologue(struct perf_probe_event *pev, int *mapping,
ptevs = malloc(array_sz);
if (!ptevs) {
- pr_debug("No enough memory: alloc ptevs failed\n");
+ pr_debug("Not enough memory: alloc ptevs failed\n");
return -ENOMEM;
}
@@ -604,13 +615,13 @@ static int hook_load_preprocessor(struct bpf_program *prog)
priv->need_prologue = true;
priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
if (!priv->insns_buf) {
- pr_debug("No enough memory: alloc insns_buf failed\n");
+ pr_debug("Not enough memory: alloc insns_buf failed\n");
return -ENOMEM;
}
priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
if (!priv->type_mapping) {
- pr_debug("No enough memory: alloc type_mapping failed\n");
+ pr_debug("Not enough memory: alloc type_mapping failed\n");
return -ENOMEM;
}
memset(priv->type_mapping, -1,
@@ -864,7 +875,7 @@ bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
op->k.array.ranges = memdup(term->array.ranges, memsz);
if (!op->k.array.ranges) {
- pr_debug("No enough memory to alloc indices for map\n");
+ pr_debug("Not enough memory to alloc indices for map\n");
return -ENOMEM;
}
op->key_type = BPF_MAP_KEY_RANGES;
@@ -929,7 +940,7 @@ bpf_map_priv__clone(struct bpf_map_priv *priv)
newpriv = zalloc(sizeof(*newpriv));
if (!newpriv) {
- pr_debug("No enough memory to alloc map private\n");
+ pr_debug("Not enough memory to alloc map private\n");
return NULL;
}
INIT_LIST_HEAD(&newpriv->ops_list);
@@ -960,7 +971,7 @@ bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
if (!priv) {
priv = zalloc(sizeof(*priv));
if (!priv) {
- pr_debug("No enough memory to alloc map private\n");
+ pr_debug("Not enough memory to alloc map private\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&priv->ops_list);
diff --git a/tools/perf/util/c++/Build b/tools/perf/util/c++/Build
new file mode 100644
index 000000000000..988fef1b11d7
--- /dev/null
+++ b/tools/perf/util/c++/Build
@@ -0,0 +1,2 @@
+libperf-$(CONFIG_CLANGLLVM) += clang.o
+libperf-$(CONFIG_CLANGLLVM) += clang-test.o
diff --git a/tools/perf/util/c++/clang-c.h b/tools/perf/util/c++/clang-c.h
new file mode 100644
index 000000000000..0eadd792ab1f
--- /dev/null
+++ b/tools/perf/util/c++/clang-c.h
@@ -0,0 +1,43 @@
+#ifndef PERF_UTIL_CLANG_C_H
+#define PERF_UTIL_CLANG_C_H
+
+#include <stddef.h> /* for size_t */
+#include <util-cxx.h> /* for __maybe_unused */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef HAVE_LIBCLANGLLVM_SUPPORT
+extern void perf_clang__init(void);
+extern void perf_clang__cleanup(void);
+
+extern int test__clang_to_IR(void);
+extern int test__clang_to_obj(void);
+
+extern int perf_clang__compile_bpf(const char *filename,
+ void **p_obj_buf,
+ size_t *p_obj_buf_sz);
+#else
+
+
+static inline void perf_clang__init(void) { }
+static inline void perf_clang__cleanup(void) { }
+
+static inline int test__clang_to_IR(void) { return -1; }
+static inline int test__clang_to_obj(void) { return -1;}
+
+static inline int
+perf_clang__compile_bpf(const char *filename __maybe_unused,
+ void **p_obj_buf __maybe_unused,
+ size_t *p_obj_buf_sz __maybe_unused)
+{
+ return -ENOTSUP;
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/tools/perf/util/c++/clang-test.cpp b/tools/perf/util/c++/clang-test.cpp
new file mode 100644
index 000000000000..9b11e8c82798
--- /dev/null
+++ b/tools/perf/util/c++/clang-test.cpp
@@ -0,0 +1,62 @@
+#include "clang.h"
+#include "clang-c.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LLVMContext.h"
+
+#include <util-cxx.h>
+#include <tests/llvm.h>
+#include <string>
+
+class perf_clang_scope {
+public:
+ explicit perf_clang_scope() {perf_clang__init();}
+ ~perf_clang_scope() {perf_clang__cleanup();}
+};
+
+static std::unique_ptr<llvm::Module>
+__test__clang_to_IR(void)
+{
+ unsigned int kernel_version;
+
+ if (fetch_kernel_version(&kernel_version, NULL, 0))
+ return std::unique_ptr<llvm::Module>(nullptr);
+
+ std::string cflag_kver("-DLINUX_VERSION_CODE=" +
+ std::to_string(kernel_version));
+
+ std::unique_ptr<llvm::Module> M =
+ perf::getModuleFromSource({cflag_kver.c_str()},
+ "perf-test.c",
+ test_llvm__bpf_base_prog);
+ return M;
+}
+
+extern "C" {
+int test__clang_to_IR(void)
+{
+ perf_clang_scope _scope;
+
+ auto M = __test__clang_to_IR();
+ if (!M)
+ return -1;
+ for (llvm::Function& F : *M)
+ if (F.getName() == "bpf_func__SyS_epoll_wait")
+ return 0;
+ return -1;
+}
+
+int test__clang_to_obj(void)
+{
+ perf_clang_scope _scope;
+
+ auto M = __test__clang_to_IR();
+ if (!M)
+ return -1;
+
+ auto Buffer = perf::getBPFObjectFromModule(&*M);
+ if (!Buffer)
+ return -1;
+ return 0;
+}
+
+}
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
new file mode 100644
index 000000000000..1e974152cac2
--- /dev/null
+++ b/tools/perf/util/c++/clang.cpp
@@ -0,0 +1,195 @@
+/*
+ * llvm C frontend for perf. Support dynamically compile C file
+ *
+ * Inspired by clang example code:
+ * http://llvm.org/svn/llvm-project/cfe/trunk/examples/clang-interpreter/main.cpp
+ *
+ * Copyright (C) 2016 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2016 Huawei Inc.
+ */
+
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include <memory>
+
+#include "clang.h"
+#include "clang-c.h"
+
+namespace perf {
+
+static std::unique_ptr<llvm::LLVMContext> LLVMCtx;
+
+using namespace clang;
+
+static CompilerInvocation *
+createCompilerInvocation(llvm::opt::ArgStringList CFlags, StringRef& Path,
+ DiagnosticsEngine& Diags)
+{
+ llvm::opt::ArgStringList CCArgs {
+ "-cc1",
+ "-triple", "bpf-pc-linux",
+ "-fsyntax-only",
+ "-ferror-limit", "19",
+ "-fmessage-length", "127",
+ "-O2",
+ "-nostdsysteminc",
+ "-nobuiltininc",
+ "-vectorize-loops",
+ "-vectorize-slp",
+ "-Wno-unused-value",
+ "-Wno-pointer-sign",
+ "-x", "c"};
+
+ CCArgs.append(CFlags.begin(), CFlags.end());
+ CompilerInvocation *CI = tooling::newInvocation(&Diags, CCArgs);
+
+ FrontendOptions& Opts = CI->getFrontendOpts();
+ Opts.Inputs.clear();
+ Opts.Inputs.emplace_back(Path, IK_C);
+ return CI;
+}
+
+static std::unique_ptr<llvm::Module>
+getModuleFromSource(llvm::opt::ArgStringList CFlags,
+ StringRef Path, IntrusiveRefCntPtr<vfs::FileSystem> VFS)
+{
+ CompilerInstance Clang;
+ Clang.createDiagnostics();
+
+ Clang.setVirtualFileSystem(&*VFS);
+
+ IntrusiveRefCntPtr<CompilerInvocation> CI =
+ createCompilerInvocation(std::move(CFlags), Path,
+ Clang.getDiagnostics());
+ Clang.setInvocation(&*CI);
+
+ std::unique_ptr<CodeGenAction> Act(new EmitLLVMOnlyAction(&*LLVMCtx));
+ if (!Clang.ExecuteAction(*Act))
+ return std::unique_ptr<llvm::Module>(nullptr);
+
+ return Act->takeModule();
+}
+
+std::unique_ptr<llvm::Module>
+getModuleFromSource(llvm::opt::ArgStringList CFlags,
+ StringRef Name, StringRef Content)
+{
+ using namespace vfs;
+
+ llvm::IntrusiveRefCntPtr<OverlayFileSystem> OverlayFS(
+ new OverlayFileSystem(getRealFileSystem()));
+ llvm::IntrusiveRefCntPtr<InMemoryFileSystem> MemFS(
+ new InMemoryFileSystem(true));
+
+ /*
+ * pushOverlay helps setting working dir for MemFS. Must call
+ * before addFile.
+ */
+ OverlayFS->pushOverlay(MemFS);
+ MemFS->addFile(Twine(Name), 0, llvm::MemoryBuffer::getMemBuffer(Content));
+
+ return getModuleFromSource(std::move(CFlags), Name, OverlayFS);
+}
+
+std::unique_ptr<llvm::Module>
+getModuleFromSource(llvm::opt::ArgStringList CFlags, StringRef Path)
+{
+ IntrusiveRefCntPtr<vfs::FileSystem> VFS(vfs::getRealFileSystem());
+ return getModuleFromSource(std::move(CFlags), Path, VFS);
+}
+
+std::unique_ptr<llvm::SmallVectorImpl<char>>
+getBPFObjectFromModule(llvm::Module *Module)
+{
+ using namespace llvm;
+
+ std::string TargetTriple("bpf-pc-linux");
+ std::string Error;
+ const Target* Target = TargetRegistry::lookupTarget(TargetTriple, Error);
+ if (!Target) {
+ llvm::errs() << Error;
+ return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);
+ }
+
+ llvm::TargetOptions Opt;
+ TargetMachine *TargetMachine =
+ Target->createTargetMachine(TargetTriple,
+ "generic", "",
+ Opt, Reloc::Static);
+
+ Module->setDataLayout(TargetMachine->createDataLayout());
+ Module->setTargetTriple(TargetTriple);
+
+ std::unique_ptr<SmallVectorImpl<char>> Buffer(new SmallVector<char, 0>());
+ raw_svector_ostream ostream(*Buffer);
+
+ legacy::PassManager PM;
+ if (TargetMachine->addPassesToEmitFile(PM, ostream,
+ TargetMachine::CGFT_ObjectFile)) {
+ llvm::errs() << "TargetMachine can't emit a file of this type\n";
+ return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
+ }
+ PM.run(*Module);
+
+ return std::move(Buffer);
+}
+
+}
+
+extern "C" {
+void perf_clang__init(void)
+{
+ perf::LLVMCtx.reset(new llvm::LLVMContext());
+ LLVMInitializeBPFTargetInfo();
+ LLVMInitializeBPFTarget();
+ LLVMInitializeBPFTargetMC();
+ LLVMInitializeBPFAsmPrinter();
+}
+
+void perf_clang__cleanup(void)
+{
+ perf::LLVMCtx.reset(nullptr);
+ llvm::llvm_shutdown();
+}
+
+int perf_clang__compile_bpf(const char *filename,
+ void **p_obj_buf,
+ size_t *p_obj_buf_sz)
+{
+ using namespace perf;
+
+ if (!p_obj_buf || !p_obj_buf_sz)
+ return -EINVAL;
+
+ llvm::opt::ArgStringList CFlags;
+ auto M = getModuleFromSource(std::move(CFlags), filename);
+ if (!M)
+ return -EINVAL;
+ auto O = getBPFObjectFromModule(&*M);
+ if (!O)
+ return -EINVAL;
+
+ size_t size = O->size_in_bytes();
+ void *buffer;
+
+ buffer = malloc(size);
+ if (!buffer)
+ return -ENOMEM;
+ memcpy(buffer, O->data(), size);
+ *p_obj_buf = buffer;
+ *p_obj_buf_sz = size;
+ return 0;
+}
+}
diff --git a/tools/perf/util/c++/clang.h b/tools/perf/util/c++/clang.h
new file mode 100644
index 000000000000..dd8b0427550d
--- /dev/null
+++ b/tools/perf/util/c++/clang.h
@@ -0,0 +1,26 @@
+#ifndef PERF_UTIL_CLANG_H
+#define PERF_UTIL_CLANG_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Option/Option.h"
+#include <memory>
+
+namespace perf {
+
+using namespace llvm;
+
+std::unique_ptr<Module>
+getModuleFromSource(opt::ArgStringList CFlags,
+ StringRef Name, StringRef Content);
+
+std::unique_ptr<Module>
+getModuleFromSource(opt::ArgStringList CFlags,
+ StringRef Path);
+
+std::unique_ptr<llvm::SmallVectorImpl<char>>
+getBPFObjectFromModule(llvm::Module *Module);
+
+}
+#endif
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 07fd30bc2f81..42922512c1c6 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -193,7 +193,6 @@ int perf_callchain_config(const char *var, const char *value)
if (!strcmp(var, "record-mode"))
return parse_callchain_record_opt(value, &callchain_param);
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
if (!strcmp(var, "dump-size")) {
unsigned long size = 0;
int ret;
@@ -203,7 +202,6 @@ int perf_callchain_config(const char *var, const char *value)
return ret;
}
-#endif
if (!strcmp(var, "print-type"))
return parse_callchain_mode(value);
if (!strcmp(var, "order"))
@@ -440,6 +438,21 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
call->ip = cursor_node->ip;
call->ms.sym = cursor_node->sym;
call->ms.map = cursor_node->map;
+
+ if (cursor_node->branch) {
+ call->branch_count = 1;
+
+ if (cursor_node->branch_flags.predicted)
+ call->predicted_count = 1;
+
+ if (cursor_node->branch_flags.abort)
+ call->abort_count = 1;
+
+ call->cycles_count = cursor_node->branch_flags.cycles;
+ call->iter_count = cursor_node->nr_loop_iter;
+ call->samples_count = cursor_node->samples;
+ }
+
list_add_tail(&call->list, &node->val);
callchain_cursor_advance(cursor);
@@ -499,8 +512,23 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
right = node->ip;
}
- if (left == right)
+ if (left == right) {
+ if (node->branch) {
+ cnode->branch_count++;
+
+ if (node->branch_flags.predicted)
+ cnode->predicted_count++;
+
+ if (node->branch_flags.abort)
+ cnode->abort_count++;
+
+ cnode->cycles_count += node->branch_flags.cycles;
+ cnode->iter_count += node->nr_loop_iter;
+ cnode->samples_count += node->samples;
+ }
+
return MATCH_EQ;
+ }
return left > right ? MATCH_GT : MATCH_LT;
}
@@ -730,7 +758,8 @@ merge_chain_branch(struct callchain_cursor *cursor,
list_for_each_entry_safe(list, next_list, &src->val, list) {
callchain_cursor_append(cursor, list->ip,
- list->ms.map, list->ms.sym);
+ list->ms.map, list->ms.sym,
+ false, NULL, 0, 0);
list_del(&list->list);
free(list);
}
@@ -767,7 +796,9 @@ int callchain_merge(struct callchain_cursor *cursor,
}
int callchain_cursor_append(struct callchain_cursor *cursor,
- u64 ip, struct map *map, struct symbol *sym)
+ u64 ip, struct map *map, struct symbol *sym,
+ bool branch, struct branch_flags *flags,
+ int nr_loop_iter, int samples)
{
struct callchain_cursor_node *node = *cursor->last;
@@ -782,6 +813,13 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
node->ip = ip;
node->map = map;
node->sym = sym;
+ node->branch = branch;
+ node->nr_loop_iter = nr_loop_iter;
+ node->samples = samples;
+
+ if (flags)
+ memcpy(&node->branch_flags, flags,
+ sizeof(struct branch_flags));
cursor->nr++;
@@ -939,6 +977,163 @@ int callchain_node__fprintf_value(struct callchain_node *node,
return 0;
}
+static void callchain_counts_value(struct callchain_node *node,
+ u64 *branch_count, u64 *predicted_count,
+ u64 *abort_count, u64 *cycles_count)
+{
+ struct callchain_list *clist;
+
+ list_for_each_entry(clist, &node->val, list) {
+ if (branch_count)
+ *branch_count += clist->branch_count;
+
+ if (predicted_count)
+ *predicted_count += clist->predicted_count;
+
+ if (abort_count)
+ *abort_count += clist->abort_count;
+
+ if (cycles_count)
+ *cycles_count += clist->cycles_count;
+ }
+}
+
+static int callchain_node_branch_counts_cumul(struct callchain_node *node,
+ u64 *branch_count,
+ u64 *predicted_count,
+ u64 *abort_count,
+ u64 *cycles_count)
+{
+ struct callchain_node *child;
+ struct rb_node *n;
+
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = rb_entry(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+
+ callchain_node_branch_counts_cumul(child, branch_count,
+ predicted_count,
+ abort_count,
+ cycles_count);
+
+ callchain_counts_value(child, branch_count,
+ predicted_count, abort_count,
+ cycles_count);
+ }
+
+ return 0;
+}
+
+int callchain_branch_counts(struct callchain_root *root,
+ u64 *branch_count, u64 *predicted_count,
+ u64 *abort_count, u64 *cycles_count)
+{
+ if (branch_count)
+ *branch_count = 0;
+
+ if (predicted_count)
+ *predicted_count = 0;
+
+ if (abort_count)
+ *abort_count = 0;
+
+ if (cycles_count)
+ *cycles_count = 0;
+
+ return callchain_node_branch_counts_cumul(&root->node,
+ branch_count,
+ predicted_count,
+ abort_count,
+ cycles_count);
+}
+
+static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
+ u64 branch_count, u64 predicted_count,
+ u64 abort_count, u64 cycles_count,
+ u64 iter_count, u64 samples_count)
+{
+ double predicted_percent = 0.0;
+ const char *null_str = "";
+ char iter_str[32];
+ char *str;
+ u64 cycles = 0;
+
+ if (branch_count == 0) {
+ if (fp)
+ return fprintf(fp, " (calltrace)");
+
+ return scnprintf(bf, bfsize, " (calltrace)");
+ }
+
+ if (iter_count && samples_count) {
+ scnprintf(iter_str, sizeof(iter_str),
+ ", iterations:%" PRId64 "",
+ iter_count / samples_count);
+ str = iter_str;
+ } else
+ str = (char *)null_str;
+
+ predicted_percent = predicted_count * 100.0 / branch_count;
+ cycles = cycles_count / branch_count;
+
+ if ((predicted_percent >= 100.0) && (abort_count == 0)) {
+ if (fp)
+ return fprintf(fp, " (cycles:%" PRId64 "%s)",
+ cycles, str);
+
+ return scnprintf(bf, bfsize, " (cycles:%" PRId64 "%s)",
+ cycles, str);
+ }
+
+ if ((predicted_percent < 100.0) && (abort_count == 0)) {
+ if (fp)
+ return fprintf(fp,
+ " (predicted:%.1f%%, cycles:%" PRId64 "%s)",
+ predicted_percent, cycles, str);
+
+ return scnprintf(bf, bfsize,
+ " (predicted:%.1f%%, cycles:%" PRId64 "%s)",
+ predicted_percent, cycles, str);
+ }
+
+ if (fp)
+ return fprintf(fp,
+ " (predicted:%.1f%%, abort:%" PRId64 ", cycles:%" PRId64 "%s)",
+ predicted_percent, abort_count, cycles, str);
+
+ return scnprintf(bf, bfsize,
+ " (predicted:%.1f%%, abort:%" PRId64 ", cycles:%" PRId64 "%s)",
+ predicted_percent, abort_count, cycles, str);
+}
+
+int callchain_list_counts__printf_value(struct callchain_node *node,
+ struct callchain_list *clist,
+ FILE *fp, char *bf, int bfsize)
+{
+ u64 branch_count, predicted_count;
+ u64 abort_count, cycles_count;
+ u64 iter_count = 0, samples_count = 0;
+
+ branch_count = clist->branch_count;
+ predicted_count = clist->predicted_count;
+ abort_count = clist->abort_count;
+ cycles_count = clist->cycles_count;
+
+ if (node) {
+ struct callchain_list *call;
+
+ list_for_each_entry(call, &node->val, list) {
+ iter_count += call->iter_count;
+ samples_count += call->samples_count;
+ }
+ }
+
+ return callchain_counts_printf(fp, bf, bfsize, branch_count,
+ predicted_count, abort_count,
+ cycles_count, iter_count, samples_count);
+}
+
static void free_callchain_node(struct callchain_node *node)
{
struct callchain_list *list, *tmp;
@@ -1039,3 +1234,30 @@ out:
}
return -ENOMEM;
}
+
+int callchain_cursor__copy(struct callchain_cursor *dst,
+ struct callchain_cursor *src)
+{
+ int rc = 0;
+
+ callchain_cursor_reset(dst);
+ callchain_cursor_commit(src);
+
+ while (true) {
+ struct callchain_cursor_node *node;
+
+ node = callchain_cursor_current(src);
+ if (node == NULL)
+ break;
+
+ rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
+ node->branch, &node->branch_flags,
+ node->nr_loop_iter, node->samples);
+ if (rc)
+ break;
+
+ callchain_cursor_advance(src);
+ }
+
+ return rc;
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 13e75549c440..35c8e379530f 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -11,11 +11,7 @@
#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace):\n\n"
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
# define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|dwarf|lbr)\n"
-#else
-# define RECORD_MODE_HELP HELP_PAD "record_mode:\tcall graph recording mode (fp|lbr)\n"
-#endif
#define RECORD_SIZE_HELP \
HELP_PAD "record_size:\tif record_mode is 'dwarf', max size of stack recording (<bytes>)\n" \
@@ -115,6 +111,12 @@ struct callchain_list {
bool unfolded;
bool has_children;
};
+ u64 branch_count;
+ u64 predicted_count;
+ u64 abort_count;
+ u64 cycles_count;
+ u64 iter_count;
+ u64 samples_count;
char *srcline;
struct list_head list;
};
@@ -129,6 +131,10 @@ struct callchain_cursor_node {
u64 ip;
struct map *map;
struct symbol *sym;
+ bool branch;
+ struct branch_flags branch_flags;
+ int nr_loop_iter;
+ int samples;
struct callchain_cursor_node *next;
};
@@ -183,7 +189,9 @@ static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
}
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,
- struct map *map, struct symbol *sym);
+ struct map *map, struct symbol *sym,
+ bool branch, struct branch_flags *flags,
+ int nr_loop_iter, int samples);
/* Close a cursor writing session. Initialize for the reader */
static inline void callchain_cursor_commit(struct callchain_cursor *cursor)
@@ -208,6 +216,9 @@ static inline void callchain_cursor_advance(struct callchain_cursor *cursor)
cursor->pos++;
}
+int callchain_cursor__copy(struct callchain_cursor *dst,
+ struct callchain_cursor *src);
+
struct option;
struct hist_entry;
@@ -261,8 +272,16 @@ char *callchain_node__scnprintf_value(struct callchain_node *node,
int callchain_node__fprintf_value(struct callchain_node *node,
FILE *fp, u64 total);
+int callchain_list_counts__printf_value(struct callchain_node *node,
+ struct callchain_list *clist,
+ FILE *fp, char *bf, int bfsize);
+
void free_callchain(struct callchain_root *root);
void decay_callchain(struct callchain_root *root);
int callchain_node__make_parent_list(struct callchain_node *node);
+int callchain_branch_counts(struct callchain_root *root,
+ u64 *branch_count, u64 *predicted_count,
+ u64 *abort_count, u64 *cycles_count);
+
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 18dae745034f..3d906dbbef74 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -594,6 +594,19 @@ static int collect_config(const char *var, const char *value,
goto out_free;
}
+ /* perf_config_set can contain both user and system config items.
+ * So we should know where each value is from.
+ * The classification would be needed when a particular config file
+ * is overwrited by setting feature i.e. set_config().
+ */
+ if (strcmp(config_file_name, perf_etc_perfconfig()) == 0) {
+ section->from_system_config = true;
+ item->from_system_config = true;
+ } else {
+ section->from_system_config = false;
+ item->from_system_config = false;
+ }
+
ret = set_value(item, value);
return ret;
@@ -602,6 +615,13 @@ out_free:
return -1;
}
+int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
+ const char *var, const char *value)
+{
+ config_file_name = file_name;
+ return collect_config(var, value, set);
+}
+
static int perf_config_set__init(struct perf_config_set *set)
{
int ret = -1;
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
index 6f813d46045e..1a59a6b43f8b 100644
--- a/tools/perf/util/config.h
+++ b/tools/perf/util/config.h
@@ -7,12 +7,14 @@
struct perf_config_item {
char *name;
char *value;
+ bool from_system_config;
struct list_head node;
};
struct perf_config_section {
char *name;
struct list_head items;
+ bool from_system_config;
struct list_head node;
};
@@ -33,6 +35,8 @@ const char *perf_etc_perfconfig(void);
struct perf_config_set *perf_config_set__new(void);
void perf_config_set__delete(struct perf_config_set *set);
+int perf_config_set__collect(struct perf_config_set *set, const char *file_name,
+ const char *var, const char *value);
void perf_config__init(void);
void perf_config__exit(void);
void perf_config__refresh(void);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 8d363d5e65a2..c735c53a26f8 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -177,6 +177,8 @@ enum {
PERF_IP_FLAG_TRACE_BEGIN |\
PERF_IP_FLAG_TRACE_END)
+#define MAX_INSN 16
+
struct perf_sample {
u64 ip;
u32 pid, tid;
@@ -193,6 +195,7 @@ struct perf_sample {
u32 flags;
u16 insn_len;
u8 cpumode;
+ char insn[MAX_INSN];
void *raw_data;
struct ip_callchain *callchain;
struct branch_stack *branch_stack;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 8bc271141d9d..b2365a63db45 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -28,6 +28,7 @@
#include "debug.h"
#include "trace-event.h"
#include "stat.h"
+#include "util/parse-branch-options.h"
static struct {
bool sample_id_all;
@@ -708,6 +709,14 @@ static void apply_config_terms(struct perf_evsel *evsel,
case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
callgraph_buf = term->val.callgraph;
break;
+ case PERF_EVSEL__CONFIG_TERM_BRANCH:
+ if (term->val.branch && strcmp(term->val.branch, "no")) {
+ perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
+ parse_branch_str(term->val.branch,
+ &attr->branch_sample_type);
+ } else
+ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
+ break;
case PERF_EVSEL__CONFIG_TERM_STACK_USER:
dump_size = term->val.stack_user;
break;
@@ -1472,7 +1481,7 @@ retry_sample_id:
group_fd = get_group_fd(evsel, cpu, thread);
retry_open:
- pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
+ pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
pid, cpus->map[cpu], group_fd, flags);
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
@@ -1481,11 +1490,13 @@ retry_open:
group_fd, flags);
if (FD(evsel, cpu, thread) < 0) {
err = -errno;
- pr_debug2("sys_perf_event_open failed, error %d\n",
+ pr_debug2("\nsys_perf_event_open failed, error %d\n",
err);
goto try_fallback;
}
+ pr_debug2(" = %d\n", FD(evsel, cpu, thread));
+
if (evsel->bpf_fd >= 0) {
int evt_fd = FD(evsel, cpu, thread);
int bpf_fd = evsel->bpf_fd;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index b1503b0ecdff..6abb89cd27f9 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -47,6 +47,7 @@ enum {
PERF_EVSEL__CONFIG_TERM_MAX_STACK,
PERF_EVSEL__CONFIG_TERM_OVERWRITE,
PERF_EVSEL__CONFIG_TERM_DRV_CFG,
+ PERF_EVSEL__CONFIG_TERM_BRANCH,
PERF_EVSEL__CONFIG_TERM_MAX,
};
@@ -63,6 +64,7 @@ struct perf_evsel_config_term {
int max_stack;
bool inherit;
bool overwrite;
+ char *branch;
} val;
};
@@ -389,6 +391,8 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
#define EVSEL__PRINT_ONELINE (1<<4)
#define EVSEL__PRINT_SRCLINE (1<<5)
#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
+#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
+#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
struct callchain_cursor;
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 662a0a6182e7..6b2925542c0a 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -108,7 +108,10 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
int print_oneline = print_opts & EVSEL__PRINT_ONELINE;
int print_srcline = print_opts & EVSEL__PRINT_SRCLINE;
int print_unknown_as_addr = print_opts & EVSEL__PRINT_UNKNOWN_AS_ADDR;
+ int print_arrow = print_opts & EVSEL__PRINT_CALLCHAIN_ARROW;
+ int print_skip_ignored = print_opts & EVSEL__PRINT_SKIP_IGNORED;
char s = print_oneline ? ' ' : '\t';
+ bool first = true;
if (sample->callchain) {
struct addr_location node_al;
@@ -122,8 +125,14 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
if (!node)
break;
+ if (node->sym && node->sym->ignore && print_skip_ignored)
+ goto next;
+
printed += fprintf(fp, "%-*.*s", left_alignment, left_alignment, " ");
+ if (print_arrow && !first)
+ printed += fprintf(fp, " <-");
+
if (print_ip)
printed += fprintf(fp, "%c%16" PRIx64, s, node->ip);
@@ -137,7 +146,8 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
if (print_symoffset) {
printed += __symbol__fprintf_symname_offs(node->sym, &node_al,
- print_unknown_as_addr, fp);
+ print_unknown_as_addr,
+ true, fp);
} else {
printed += __symbol__fprintf_symname(node->sym, &node_al,
print_unknown_as_addr, fp);
@@ -156,6 +166,16 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
if (!print_oneline)
printed += fprintf(fp, "\n");
+ if (symbol_conf.bt_stop_list &&
+ node->sym &&
+ node->sym->name &&
+ strlist__has_entry(symbol_conf.bt_stop_list,
+ node->sym->name)) {
+ break;
+ }
+
+ first = false;
+next:
callchain_cursor_advance(cursor);
}
}
@@ -188,7 +208,8 @@ int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
printed += fprintf(fp, " ");
if (print_symoffset) {
printed += __symbol__fprintf_symname_offs(al->sym, al,
- print_unknown_as_addr, fp);
+ print_unknown_as_addr,
+ true, fp);
} else {
printed += __symbol__fprintf_symname(al->sym, al,
print_unknown_as_addr, fp);
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index c1ef805c6a8f..c540d47583e7 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -19,12 +19,18 @@
#include <limits.h>
#include <fcntl.h>
#include <err.h>
+#ifdef HAVE_DWARF_SUPPORT
#include <dwarf.h>
+#endif
#include "perf.h"
#include "genelf.h"
#include "../util/jitdump.h"
+#ifndef NT_GNU_BUILD_ID
+#define NT_GNU_BUILD_ID 3
+#endif
+
#define JVMTI
#define BUILD_ID_URANDOM /* different uuid for each run */
@@ -67,6 +73,8 @@ static char shd_string_table[] = {
'.', 'd', 'e', 'b', 'u', 'g', '_', 'l', 'i', 'n', 'e', 0, /* 52 */
'.', 'd', 'e', 'b', 'u', 'g', '_', 'i', 'n', 'f', 'o', 0, /* 64 */
'.', 'd', 'e', 'b', 'u', 'g', '_', 'a', 'b', 'b', 'r', 'e', 'v', 0, /* 76 */
+ '.', 'e', 'h', '_', 'f', 'r', 'a', 'm', 'e', '_', 'h', 'd', 'r', 0, /* 90 */
+ '.', 'e', 'h', '_', 'f', 'r', 'a', 'm', 'e', 0, /* 104 */
};
static struct buildid_note {
@@ -147,6 +155,86 @@ gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *cod
}
#endif
+static int
+jit_add_eh_frame_info(Elf *e, void* unwinding, uint64_t unwinding_header_size,
+ uint64_t unwinding_size, uint64_t base_offset)
+{
+ Elf_Data *d;
+ Elf_Scn *scn;
+ Elf_Shdr *shdr;
+ uint64_t unwinding_table_size = unwinding_size - unwinding_header_size;
+
+ /*
+ * setup eh_frame section
+ */
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ return -1;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ return -1;
+ }
+
+ d->d_align = 8;
+ d->d_off = 0LL;
+ d->d_buf = unwinding;
+ d->d_type = ELF_T_BYTE;
+ d->d_size = unwinding_table_size;
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ return -1;
+ }
+
+ shdr->sh_name = 104;
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_addr = base_offset;
+ shdr->sh_flags = SHF_ALLOC;
+ shdr->sh_entsize = 0;
+
+ /*
+ * setup eh_frame_hdr section
+ */
+ scn = elf_newscn(e);
+ if (!scn) {
+ warnx("cannot create section");
+ return -1;
+ }
+
+ d = elf_newdata(scn);
+ if (!d) {
+ warnx("cannot get new data");
+ return -1;
+ }
+
+ d->d_align = 4;
+ d->d_off = 0LL;
+ d->d_buf = unwinding + unwinding_table_size;
+ d->d_type = ELF_T_BYTE;
+ d->d_size = unwinding_header_size;
+ d->d_version = EV_CURRENT;
+
+ shdr = elf_getshdr(scn);
+ if (!shdr) {
+ warnx("cannot get section header");
+ return -1;
+ }
+
+ shdr->sh_name = 90;
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_addr = base_offset + unwinding_table_size;
+ shdr->sh_flags = SHF_ALLOC;
+ shdr->sh_entsize = 0;
+
+ return 0;
+}
+
/*
* fd: file descriptor open for writing for the output file
* load_addr: code load address (could be zero, just used for buildid)
@@ -157,13 +245,15 @@ gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *cod
int
jit_write_elf(int fd, uint64_t load_addr, const char *sym,
const void *code, int csize,
- void *debug, int nr_debug_entries)
+ void *debug __maybe_unused, int nr_debug_entries __maybe_unused,
+ void *unwinding, uint64_t unwinding_header_size, uint64_t unwinding_size)
{
Elf *e;
Elf_Data *d;
Elf_Scn *scn;
Elf_Ehdr *ehdr;
Elf_Shdr *shdr;
+ uint64_t eh_frame_base_offset;
char *strsym = NULL;
int symlen;
int retval = -1;
@@ -194,7 +284,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
ehdr->e_type = ET_DYN;
ehdr->e_entry = GEN_ELF_TEXT_OFFSET;
ehdr->e_version = EV_CURRENT;
- ehdr->e_shstrndx= 2; /* shdr index for section name */
+ ehdr->e_shstrndx= unwinding ? 4 : 2; /* shdr index for section name */
/*
* setup text section
@@ -231,6 +321,18 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
shdr->sh_entsize = 0;
/*
+ * Setup .eh_frame_hdr and .eh_frame
+ */
+ if (unwinding) {
+ eh_frame_base_offset = ALIGN_8(GEN_ELF_TEXT_OFFSET + csize);
+ retval = jit_add_eh_frame_info(e, unwinding,
+ unwinding_header_size, unwinding_size,
+ eh_frame_base_offset);
+ if (retval)
+ goto error;
+ }
+
+ /*
* setup section headers string table
*/
scn = elf_newscn(e);
@@ -298,7 +400,7 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
shdr->sh_type = SHT_SYMTAB;
shdr->sh_flags = 0;
shdr->sh_entsize = sizeof(Elf_Sym);
- shdr->sh_link = 4; /* index of .strtab section */
+ shdr->sh_link = unwinding ? 6 : 4; /* index of .strtab section */
/*
* setup symbols string table
@@ -386,11 +488,14 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym,
shdr->sh_size = sizeof(bnote);
shdr->sh_entsize = 0;
+#ifdef HAVE_DWARF_SUPPORT
if (debug && nr_debug_entries) {
retval = jit_add_debug_info(e, load_addr, debug, nr_debug_entries);
if (retval)
goto error;
- } else {
+ } else
+#endif
+ {
if (elf_update(e, ELF_C_WRITE) < 0) {
warnx("elf_update 4 failed");
goto error;
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index 2fbeb59c4bdd..2424bd9862a3 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -3,9 +3,12 @@
/* genelf.c */
int jit_write_elf(int fd, uint64_t code_addr, const char *sym,
- const void *code, int csize, void *debug, int nr_debug_entries);
+ const void *code, int csize, void *debug, int nr_debug_entries,
+ void *unwinding, uint64_t unwinding_header_size, uint64_t unwinding_size);
+#ifdef HAVE_DWARF_SUPPORT
/* genelf_debug.c */
int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries);
+#endif
#if defined(__arm__)
#define GEN_ELF_ARCH EM_ARM
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 2f3eded54b0c..d89c9c7ef4e5 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2250,11 +2250,28 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
struct header_print_data hd;
struct perf_header *header = &session->header;
int fd = perf_data_file__fd(session->file);
+ struct stat st;
+ int ret, bit;
+
hd.fp = fp;
hd.full = full;
+ ret = fstat(fd, &st);
+ if (ret == -1)
+ return -1;
+
+ fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
+
perf_header__process_sections(header, fd, &hd,
perf_file_section__fprintf_info);
+
+ fprintf(fp, "# missing features: ");
+ for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
+ if (bit)
+ fprintf(fp, "%s ", feat_ops[bit].name);
+ }
+
+ fprintf(fp, "\n");
return 0;
}
@@ -2273,7 +2290,7 @@ static int do_write_feat(int fd, struct perf_header *h, int type,
err = feat_ops[type].write(fd, h, evlist);
if (err < 0) {
- pr_debug("failed to write feature %d\n", type);
+ pr_debug("failed to write feature %s\n", feat_ops[type].name);
/* undo anything written */
lseek(fd, (*p)->offset, SEEK_SET);
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index a69f027368ef..6770a9645609 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1195,6 +1195,7 @@ static void hist_entry__check_and_remove_filter(struct hist_entry *he,
case HIST_FILTER__GUEST:
case HIST_FILTER__HOST:
case HIST_FILTER__SOCKET:
+ case HIST_FILTER__C2C:
default:
return;
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 9928fed8bc59..d4b6514eeef5 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -22,6 +22,7 @@ enum hist_filter {
HIST_FILTER__GUEST,
HIST_FILTER__HOST,
HIST_FILTER__SOCKET,
+ HIST_FILTER__C2C,
};
enum hist_column {
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
index f545ec1e758a..6c2eb5da4afc 100644
--- a/tools/perf/util/intel-bts.c
+++ b/tools/perf/util/intel-bts.c
@@ -295,6 +295,7 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
sample.cpu = btsq->cpu;
sample.flags = btsq->sample_flags;
sample.insn_len = btsq->intel_pt_insn.length;
+ memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ);
if (bts->synth_opts.inject) {
event.sample.header.size = bts->branches_event_size;
@@ -319,15 +320,12 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
struct machine *machine = btsq->bts->machine;
struct thread *thread;
struct addr_location al;
- unsigned char buf[1024];
- size_t bufsz;
+ unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
int x86_64;
uint8_t cpumode;
int err = -1;
- bufsz = intel_pt_insn_max_size();
-
if (machine__kernel_ip(machine, ip))
cpumode = PERF_RECORD_MISC_KERNEL;
else
@@ -341,7 +339,8 @@ static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
if (!al.map || !al.map->dso)
goto out_put;
- len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf, bufsz);
+ len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf,
+ INTEL_PT_INSN_BUF_SZ);
if (len <= 0)
goto out_put;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 16c06d3ae577..e4e7dc781d21 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -980,6 +980,8 @@ out:
out_no_progress:
decoder->state.insn_op = intel_pt_insn->op;
decoder->state.insn_len = intel_pt_insn->length;
+ memcpy(decoder->state.insn, intel_pt_insn->buf,
+ INTEL_PT_INSN_BUF_SZ);
if (decoder->tx_flags & INTEL_PT_IN_TX)
decoder->state.flags |= INTEL_PT_IN_TX;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index 89399985fa4d..e90619a43c0c 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -66,6 +66,7 @@ struct intel_pt_state {
uint32_t flags;
enum intel_pt_insn_op insn_op;
int insn_len;
+ char insn[INTEL_PT_INSN_BUF_SZ];
};
struct intel_pt_insn;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
index d23138c06665..7913363bde5c 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -27,6 +27,10 @@
#include "intel-pt-insn-decoder.h"
+#if INTEL_PT_INSN_BUF_SZ < MAX_INSN_SIZE || INTEL_PT_INSN_BUF_SZ > MAX_INSN
+#error Instruction buffer size too small
+#endif
+
/* Based on branch_type() from perf_event_intel_lbr.c */
static void intel_pt_insn_decoder(struct insn *insn,
struct intel_pt_insn *intel_pt_insn)
@@ -166,10 +170,10 @@ int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64,
if (!insn_complete(&insn) || insn.length > len)
return -1;
intel_pt_insn_decoder(&insn, intel_pt_insn);
- if (insn.length < INTEL_PT_INSN_DBG_BUF_SZ)
+ if (insn.length < INTEL_PT_INSN_BUF_SZ)
memcpy(intel_pt_insn->buf, buf, insn.length);
else
- memcpy(intel_pt_insn->buf, buf, INTEL_PT_INSN_DBG_BUF_SZ);
+ memcpy(intel_pt_insn->buf, buf, INTEL_PT_INSN_BUF_SZ);
return 0;
}
@@ -211,11 +215,6 @@ int intel_pt_insn_desc(const struct intel_pt_insn *intel_pt_insn, char *buf,
return 0;
}
-size_t intel_pt_insn_max_size(void)
-{
- return MAX_INSN_SIZE;
-}
-
int intel_pt_insn_type(enum intel_pt_insn_op op)
{
switch (op) {
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
index b0adbf37323e..37ec5627ae9b 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
@@ -20,7 +20,7 @@
#include <stdint.h>
#define INTEL_PT_INSN_DESC_MAX 32
-#define INTEL_PT_INSN_DBG_BUF_SZ 16
+#define INTEL_PT_INSN_BUF_SZ 16
enum intel_pt_insn_op {
INTEL_PT_OP_OTHER,
@@ -47,7 +47,7 @@ struct intel_pt_insn {
enum intel_pt_insn_branch branch;
int length;
int32_t rel;
- unsigned char buf[INTEL_PT_INSN_DBG_BUF_SZ];
+ unsigned char buf[INTEL_PT_INSN_BUF_SZ];
};
int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64,
@@ -58,8 +58,6 @@ const char *intel_pt_insn_name(enum intel_pt_insn_op op);
int intel_pt_insn_desc(const struct intel_pt_insn *intel_pt_insn, char *buf,
size_t buf_len);
-size_t intel_pt_insn_max_size(void);
-
int intel_pt_insn_type(enum intel_pt_insn_op op);
#endif
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
index 319bef33a64b..e02bc7b166a0 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-log.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
@@ -119,8 +119,8 @@ void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
if (intel_pt_log_open())
return;
- if (len > INTEL_PT_INSN_DBG_BUF_SZ)
- len = INTEL_PT_INSN_DBG_BUF_SZ;
+ if (len > INTEL_PT_INSN_BUF_SZ)
+ len = INTEL_PT_INSN_BUF_SZ;
intel_pt_print_data(intel_pt_insn->buf, len, ip, 8);
if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0)
fprintf(f, "%s\n", desc);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index dc041d4368c8..85d5eeb66c75 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -143,6 +143,7 @@ struct intel_pt_queue {
u32 flags;
u16 insn_len;
u64 last_insn_cnt;
+ char insn[INTEL_PT_INSN_BUF_SZ];
};
static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
@@ -315,6 +316,7 @@ struct intel_pt_cache_entry {
enum intel_pt_insn_branch branch;
int length;
int32_t rel;
+ char insn[INTEL_PT_INSN_BUF_SZ];
};
static int intel_pt_config_div(const char *var, const char *value, void *data)
@@ -400,6 +402,7 @@ static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
e->branch = intel_pt_insn->branch;
e->length = intel_pt_insn->length;
e->rel = intel_pt_insn->rel;
+ memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
err = auxtrace_cache__add(c, offset, &e->entry);
if (err)
@@ -428,8 +431,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
struct machine *machine = ptq->pt->machine;
struct thread *thread;
struct addr_location al;
- unsigned char buf[1024];
- size_t bufsz;
+ unsigned char buf[INTEL_PT_INSN_BUF_SZ];
ssize_t len;
int x86_64;
u8 cpumode;
@@ -437,11 +439,11 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
u64 insn_cnt = 0;
bool one_map = true;
+ intel_pt_insn->length = 0;
+
if (to_ip && *ip == to_ip)
goto out_no_cache;
- bufsz = intel_pt_insn_max_size();
-
if (*ip >= ptq->pt->kernel_start)
cpumode = PERF_RECORD_MISC_KERNEL;
else
@@ -478,6 +480,8 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
intel_pt_insn->branch = e->branch;
intel_pt_insn->length = e->length;
intel_pt_insn->rel = e->rel;
+ memcpy(intel_pt_insn->buf, e->insn,
+ INTEL_PT_INSN_BUF_SZ);
intel_pt_log_insn_no_data(intel_pt_insn, *ip);
return 0;
}
@@ -493,7 +497,8 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
while (1) {
len = dso__data_read_offset(al.map->dso, machine,
- offset, buf, bufsz);
+ offset, buf,
+ INTEL_PT_INSN_BUF_SZ);
if (len <= 0)
return -EINVAL;
@@ -900,6 +905,7 @@ static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
if (ptq->state->flags & INTEL_PT_IN_TX)
ptq->flags |= PERF_IP_FLAG_IN_TX;
ptq->insn_len = ptq->state->insn_len;
+ memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
}
}
@@ -1080,6 +1086,7 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
+ memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
/*
* perf report cannot handle events without a branch stack when using
@@ -1141,6 +1148,7 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
+ memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
@@ -1203,6 +1211,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
+ memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->chain,
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 95f0884aae02..c9a941ef0f6d 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -37,6 +37,10 @@ struct jit_buf_desc {
bool needs_bswap; /* handles cross-endianess */
bool use_arch_timestamp;
void *debug_data;
+ void *unwinding_data;
+ uint64_t unwinding_size;
+ uint64_t unwinding_mapped_size;
+ uint64_t eh_frame_hdr_size;
size_t nr_debug_entries;
uint32_t code_load_count;
u64 bytes_written;
@@ -68,7 +72,10 @@ jit_emit_elf(char *filename,
const void *code,
int csize,
void *debug,
- int nr_debug_entries)
+ int nr_debug_entries,
+ void *unwinding,
+ uint32_t unwinding_header_size,
+ uint32_t unwinding_size)
{
int ret, fd;
@@ -81,7 +88,8 @@ jit_emit_elf(char *filename,
return -1;
}
- ret = jit_write_elf(fd, code_addr, sym, (const void *)code, csize, debug, nr_debug_entries);
+ ret = jit_write_elf(fd, code_addr, sym, (const void *)code, csize, debug, nr_debug_entries,
+ unwinding, unwinding_header_size, unwinding_size);
close(fd);
@@ -172,6 +180,12 @@ jit_open(struct jit_buf_desc *jd, const char *name)
header.elf_mach,
jd->use_arch_timestamp);
+ if (header.version > JITHEADER_VERSION) {
+ pr_err("wrong jitdump version %u, expected " STR(JITHEADER_VERSION),
+ header.version);
+ goto error;
+ }
+
if (header.flags & JITDUMP_FLAGS_RESERVED) {
pr_err("jitdump file contains invalid or unsupported flags 0x%llx\n",
(unsigned long long)header.flags & JITDUMP_FLAGS_RESERVED);
@@ -263,8 +277,7 @@ jit_get_next_entry(struct jit_buf_desc *jd)
return NULL;
if (id >= JIT_CODE_MAX) {
- pr_warning("next_entry: unknown prefix %d, skipping\n", id);
- return NULL;
+ pr_warning("next_entry: unknown record type %d, skipping\n", id);
}
if (bs > jd->bufsize) {
void *n;
@@ -296,6 +309,13 @@ jit_get_next_entry(struct jit_buf_desc *jd)
}
}
break;
+ case JIT_CODE_UNWINDING_INFO:
+ if (jd->needs_bswap) {
+ jr->unwinding.unwinding_size = bswap_64(jr->unwinding.unwinding_size);
+ jr->unwinding.eh_frame_hdr_size = bswap_64(jr->unwinding.eh_frame_hdr_size);
+ jr->unwinding.mapped_size = bswap_64(jr->unwinding.mapped_size);
+ }
+ break;
case JIT_CODE_CLOSE:
break;
case JIT_CODE_LOAD:
@@ -322,7 +342,8 @@ jit_get_next_entry(struct jit_buf_desc *jd)
break;
case JIT_CODE_MAX:
default:
- return NULL;
+ /* skip unknown record (we have read them) */
+ break;
}
return jr;
}
@@ -370,7 +391,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
u16 idr_size;
const char *sym;
uint32_t count;
- int ret, csize;
+ int ret, csize, usize;
pid_t pid, tid;
struct {
u32 pid, tid;
@@ -380,6 +401,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
pid = jr->load.pid;
tid = jr->load.tid;
csize = jr->load.code_size;
+ usize = jd->unwinding_mapped_size;
addr = jr->load.code_addr;
sym = (void *)((unsigned long)jr + sizeof(jr->load));
code = (unsigned long)jr + jr->load.p.total_size - csize;
@@ -400,7 +422,8 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
size = PERF_ALIGN(size, sizeof(u64));
uaddr = (uintptr_t)code;
- ret = jit_emit_elf(filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries);
+ ret = jit_emit_elf(filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries,
+ jd->unwinding_data, jd->eh_frame_hdr_size, jd->unwinding_size);
if (jd->debug_data && jd->nr_debug_entries) {
free(jd->debug_data);
@@ -408,6 +431,14 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
jd->nr_debug_entries = 0;
}
+ if (jd->unwinding_data && jd->eh_frame_hdr_size) {
+ free(jd->unwinding_data);
+ jd->unwinding_data = NULL;
+ jd->eh_frame_hdr_size = 0;
+ jd->unwinding_mapped_size = 0;
+ jd->unwinding_size = 0;
+ }
+
if (ret) {
free(event);
return -1;
@@ -422,7 +453,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
event->mmap2.start = addr;
- event->mmap2.len = csize;
+ event->mmap2.len = usize ? ALIGN_8(csize) + usize : csize;
event->mmap2.pid = pid;
event->mmap2.tid = tid;
event->mmap2.ino = st.st_ino;
@@ -473,6 +504,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
char *filename;
size_t size;
struct stat st;
+ int usize;
u16 idr_size;
int ret;
pid_t pid, tid;
@@ -483,6 +515,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
pid = jr->move.pid;
tid = jr->move.tid;
+ usize = jd->unwinding_mapped_size;
idr_size = jd->machine->id_hdr_size;
/*
@@ -511,7 +544,8 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
(sizeof(event->mmap2.filename) - size) + idr_size);
event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
event->mmap2.start = jr->move.new_code_addr;
- event->mmap2.len = jr->move.code_size;
+ event->mmap2.len = usize ? ALIGN_8(jr->move.code_size) + usize
+ : jr->move.code_size;
event->mmap2.pid = pid;
event->mmap2.tid = tid;
event->mmap2.ino = st.st_ino;
@@ -578,10 +612,35 @@ static int jit_repipe_debug_info(struct jit_buf_desc *jd, union jr_entry *jr)
}
static int
+jit_repipe_unwinding_info(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+ void *unwinding_data;
+ uint32_t unwinding_data_size;
+
+ if (!(jd && jr))
+ return -1;
+
+ unwinding_data_size = jr->prefix.total_size - sizeof(jr->unwinding);
+ unwinding_data = malloc(unwinding_data_size);
+ if (!unwinding_data)
+ return -1;
+
+ memcpy(unwinding_data, &jr->unwinding.unwinding_data,
+ unwinding_data_size);
+
+ jd->eh_frame_hdr_size = jr->unwinding.eh_frame_hdr_size;
+ jd->unwinding_size = jr->unwinding.unwinding_size;
+ jd->unwinding_mapped_size = jr->unwinding.mapped_size;
+ jd->unwinding_data = unwinding_data;
+
+ return 0;
+}
+
+static int
jit_process_dump(struct jit_buf_desc *jd)
{
union jr_entry *jr;
- int ret;
+ int ret = 0;
while ((jr = jit_get_next_entry(jd))) {
switch(jr->prefix.id) {
@@ -594,6 +653,9 @@ jit_process_dump(struct jit_buf_desc *jd)
case JIT_CODE_DEBUG_INFO:
ret = jit_repipe_debug_info(jd, jr);
break;
+ case JIT_CODE_UNWINDING_INFO:
+ ret = jit_repipe_unwinding_info(jd, jr);
+ break;
default:
ret = 0;
continue;
diff --git a/tools/perf/util/jitdump.h b/tools/perf/util/jitdump.h
index bcacd20d0c1c..c6b9b67f43bf 100644
--- a/tools/perf/util/jitdump.h
+++ b/tools/perf/util/jitdump.h
@@ -19,6 +19,7 @@
#define JITHEADER_MAGIC_SW 0x4454694A
#define PADDING_8ALIGNED(x) ((((x) + 7) & 7) ^ 7)
+#define ALIGN_8(x) (((x) + 7) & (~7))
#define JITHEADER_VERSION 1
@@ -48,6 +49,7 @@ enum jit_record_type {
JIT_CODE_MOVE = 1,
JIT_CODE_DEBUG_INFO = 2,
JIT_CODE_CLOSE = 3,
+ JIT_CODE_UNWINDING_INFO = 4,
JIT_CODE_MAX,
};
@@ -101,12 +103,22 @@ struct jr_code_debug_info {
struct debug_entry entries[0];
};
+struct jr_code_unwinding_info {
+ struct jr_prefix p;
+
+ uint64_t unwinding_size;
+ uint64_t eh_frame_hdr_size;
+ uint64_t mapped_size;
+ const char unwinding_data[0];
+};
+
union jr_entry {
struct jr_code_debug_info info;
struct jr_code_close close;
struct jr_code_load load;
struct jr_code_move move;
struct jr_prefix prefix;
+ struct jr_code_unwinding_info unwinding;
};
static inline struct debug_entry *
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index bf7216b8731d..b23ff44cf214 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -7,6 +7,7 @@
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
+#include <linux/err.h>
#include "debug.h"
#include "llvm-utils.h"
#include "config.h"
@@ -282,9 +283,10 @@ static const char *kinc_fetch_script =
"rm -rf $TMPDIR\n"
"exit $RET\n";
-static inline void
-get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
+void llvm__get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
{
+ static char *saved_kbuild_dir;
+ static char *saved_kbuild_include_opts;
int err;
if (!kbuild_dir || !kbuild_include_opts)
@@ -293,10 +295,28 @@ get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
*kbuild_dir = NULL;
*kbuild_include_opts = NULL;
+ if (saved_kbuild_dir && saved_kbuild_include_opts &&
+ !IS_ERR(saved_kbuild_dir) && !IS_ERR(saved_kbuild_include_opts)) {
+ *kbuild_dir = strdup(saved_kbuild_dir);
+ *kbuild_include_opts = strdup(saved_kbuild_include_opts);
+
+ if (*kbuild_dir && *kbuild_include_opts)
+ return;
+
+ zfree(kbuild_dir);
+ zfree(kbuild_include_opts);
+ /*
+ * Don't fall through: it may breaks saved_kbuild_dir and
+ * saved_kbuild_include_opts if detect them again when
+ * memory is low.
+ */
+ return;
+ }
+
if (llvm_param.kbuild_dir && !llvm_param.kbuild_dir[0]) {
pr_debug("[llvm.kbuild-dir] is set to \"\" deliberately.\n");
pr_debug("Skip kbuild options detection.\n");
- return;
+ goto errout;
}
err = detect_kbuild_dir(kbuild_dir);
@@ -306,7 +326,7 @@ get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
"Hint:\tSet correct kbuild directory using 'kbuild-dir' option in [llvm]\n"
" \tsection of ~/.perfconfig or set it to \"\" to suppress kbuild\n"
" \tdetection.\n\n");
- return;
+ goto errout;
}
pr_debug("Kernel build dir is set to %s\n", *kbuild_dir);
@@ -325,21 +345,50 @@ get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
free(*kbuild_dir);
*kbuild_dir = NULL;
- return;
+ goto errout;
}
pr_debug("include option is set to %s\n", *kbuild_include_opts);
+
+ saved_kbuild_dir = strdup(*kbuild_dir);
+ saved_kbuild_include_opts = strdup(*kbuild_include_opts);
+
+ if (!saved_kbuild_dir || !saved_kbuild_include_opts) {
+ zfree(&saved_kbuild_dir);
+ zfree(&saved_kbuild_include_opts);
+ }
+ return;
+errout:
+ saved_kbuild_dir = ERR_PTR(-EINVAL);
+ saved_kbuild_include_opts = ERR_PTR(-EINVAL);
}
-static void
-dump_obj(const char *path, void *obj_buf, size_t size)
+int llvm__get_nr_cpus(void)
+{
+ static int nr_cpus_avail = 0;
+ char serr[STRERR_BUFSIZE];
+
+ if (nr_cpus_avail > 0)
+ return nr_cpus_avail;
+
+ nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF);
+ if (nr_cpus_avail <= 0) {
+ pr_err(
+"WARNING:\tunable to get available CPUs in this system: %s\n"
+" \tUse 128 instead.\n", str_error_r(errno, serr, sizeof(serr)));
+ nr_cpus_avail = 128;
+ }
+ return nr_cpus_avail;
+}
+
+void llvm__dump_obj(const char *path, void *obj_buf, size_t size)
{
char *obj_path = strdup(path);
FILE *fp;
char *p;
if (!obj_path) {
- pr_warning("WARNING: No enough memory, skip object dumping\n");
+ pr_warning("WARNING: Not enough memory, skip object dumping\n");
return;
}
@@ -406,15 +455,9 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
* This is an optional work. Even it fail we can continue our
* work. Needn't to check error return.
*/
- get_kbuild_opts(&kbuild_dir, &kbuild_include_opts);
+ llvm__get_kbuild_opts(&kbuild_dir, &kbuild_include_opts);
- nr_cpus_avail = sysconf(_SC_NPROCESSORS_CONF);
- if (nr_cpus_avail <= 0) {
- pr_err(
-"WARNING:\tunable to get available CPUs in this system: %s\n"
-" \tUse 128 instead.\n", str_error_r(errno, serr, sizeof(serr)));
- nr_cpus_avail = 128;
- }
+ nr_cpus_avail = llvm__get_nr_cpus();
snprintf(nr_cpus_avail_str, sizeof(nr_cpus_avail_str), "%d",
nr_cpus_avail);
@@ -453,9 +496,6 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
free(kbuild_dir);
free(kbuild_include_opts);
- if (llvm_param.dump_obj)
- dump_obj(path, obj_buf, obj_buf_sz);
-
if (!p_obj_buf)
free(obj_buf);
else
diff --git a/tools/perf/util/llvm-utils.h b/tools/perf/util/llvm-utils.h
index 9f501cef06a1..c87a2a92a88f 100644
--- a/tools/perf/util/llvm-utils.h
+++ b/tools/perf/util/llvm-utils.h
@@ -50,4 +50,10 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf, size_t *p_obj_buf_sz);
/* This function is for test__llvm() use only */
int llvm__search_clang(void);
+
+/* Following functions are reused by builtin clang support */
+void llvm__get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts);
+int llvm__get_nr_cpus(void);
+
+void llvm__dump_obj(const char *path, void *obj_buf, size_t size);
#endif
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index df85b9efd80f..9b33bef54581 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1616,7 +1616,11 @@ static int add_callchain_ip(struct thread *thread,
struct symbol **parent,
struct addr_location *root_al,
u8 *cpumode,
- u64 ip)
+ u64 ip,
+ bool branch,
+ struct branch_flags *flags,
+ int nr_loop_iter,
+ int samples)
{
struct addr_location al;
@@ -1668,7 +1672,8 @@ static int add_callchain_ip(struct thread *thread,
if (symbol_conf.hide_unresolved && al.sym == NULL)
return 0;
- return callchain_cursor_append(cursor, al.addr, al.map, al.sym);
+ return callchain_cursor_append(cursor, al.addr, al.map, al.sym,
+ branch, flags, nr_loop_iter, samples);
}
struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
@@ -1757,7 +1762,9 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
/* LBR only affects the user callchain */
if (i != chain_nr) {
struct branch_stack *lbr_stack = sample->branch_stack;
- int lbr_nr = lbr_stack->nr, j;
+ int lbr_nr = lbr_stack->nr, j, k;
+ bool branch;
+ struct branch_flags *flags;
/*
* LBR callstack can only get user call chain.
* The mix_chain_nr is kernel call chain
@@ -1772,23 +1779,41 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
for (j = 0; j < mix_chain_nr; j++) {
int err;
+ branch = false;
+ flags = NULL;
+
if (callchain_param.order == ORDER_CALLEE) {
if (j < i + 1)
ip = chain->ips[j];
- else if (j > i + 1)
- ip = lbr_stack->entries[j - i - 2].from;
- else
+ else if (j > i + 1) {
+ k = j - i - 2;
+ ip = lbr_stack->entries[k].from;
+ branch = true;
+ flags = &lbr_stack->entries[k].flags;
+ } else {
ip = lbr_stack->entries[0].to;
+ branch = true;
+ flags = &lbr_stack->entries[0].flags;
+ }
} else {
- if (j < lbr_nr)
- ip = lbr_stack->entries[lbr_nr - j - 1].from;
+ if (j < lbr_nr) {
+ k = lbr_nr - j - 1;
+ ip = lbr_stack->entries[k].from;
+ branch = true;
+ flags = &lbr_stack->entries[k].flags;
+ }
else if (j > lbr_nr)
ip = chain->ips[i + 1 - (j - lbr_nr)];
- else
+ else {
ip = lbr_stack->entries[0].to;
+ branch = true;
+ flags = &lbr_stack->entries[0].flags;
+ }
}
- err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ branch, flags, 0, 0);
if (err)
return (err < 0) ? err : 0;
}
@@ -1813,6 +1838,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
int i, j, err, nr_entries;
int skip_idx = -1;
int first_call = 0;
+ int nr_loop_iter;
if (perf_evsel__has_branch_callstack(evsel)) {
err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
@@ -1868,14 +1894,37 @@ static int thread__resolve_callchain_sample(struct thread *thread,
be[i] = branch->entries[branch->nr - i - 1];
}
+ nr_loop_iter = nr;
nr = remove_loops(be, nr);
+ /*
+ * Get the number of iterations.
+ * It's only approximation, but good enough in practice.
+ */
+ if (nr_loop_iter > nr)
+ nr_loop_iter = nr_loop_iter - nr + 1;
+ else
+ nr_loop_iter = 0;
+
for (i = 0; i < nr; i++) {
- err = add_callchain_ip(thread, cursor, parent, root_al,
- NULL, be[i].to);
+ if (i == nr - 1)
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al,
+ NULL, be[i].to,
+ true, &be[i].flags,
+ nr_loop_iter, 1);
+ else
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al,
+ NULL, be[i].to,
+ true, &be[i].flags,
+ 0, 0);
+
if (!err)
err = add_callchain_ip(thread, cursor, parent, root_al,
- NULL, be[i].from);
+ NULL, be[i].from,
+ true, &be[i].flags,
+ 0, 0);
if (err == -EINVAL)
break;
if (err)
@@ -1903,7 +1952,9 @@ check_calls:
if (ip < PERF_CONTEXT_MAX)
++nr_entries;
- err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ false, NULL, 0, 0);
if (err)
return (err < 0) ? err : 0;
@@ -1919,7 +1970,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
if (symbol_conf.hide_unresolved && entry->sym == NULL)
return 0;
return callchain_cursor_append(cursor, entry->ip,
- entry->map, entry->sym);
+ entry->map, entry->sym,
+ false, NULL, 0, 0);
}
static int thread__resolve_callchain_unwind(struct thread *thread,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index c662fef95d14..4f9a71c63026 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -682,9 +682,16 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
continue;
if (verbose >= 2) {
- fputs("overlapping maps:\n", fp);
- map__fprintf(map, fp);
- map__fprintf(pos, fp);
+
+ if (use_browser) {
+ pr_warning("overlapping maps in %s "
+ "(disable tui for more info)\n",
+ map->dso->name);
+ } else {
+ fputs("overlapping maps:\n", fp);
+ map__fprintf(map, fp);
+ map__fprintf(pos, fp);
+ }
}
rb_erase_init(&pos->rb_node, root);
@@ -702,7 +709,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
before->end = map->start;
__map_groups__insert(pos->groups, before);
- if (verbose >= 2)
+ if (verbose >= 2 && !use_browser)
map__fprintf(before, fp);
map__put(before);
}
@@ -717,7 +724,7 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
after->start = map->end;
__map_groups__insert(pos->groups, after);
- if (verbose >= 2)
+ if (verbose >= 2 && !use_browser)
map__fprintf(after, fp);
map__put(after);
}
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index bbc368e7d1e4..1d4ab53c60ca 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -9,6 +9,7 @@
#include "mem-events.h"
#include "debug.h"
#include "symbol.h"
+#include "sort.h"
unsigned int perf_mem_events__loads_ldlat = 30;
@@ -268,3 +269,138 @@ int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_in
return i;
}
+
+int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
+{
+ union perf_mem_data_src *data_src = &mi->data_src;
+ u64 daddr = mi->daddr.addr;
+ u64 op = data_src->mem_op;
+ u64 lvl = data_src->mem_lvl;
+ u64 snoop = data_src->mem_snoop;
+ u64 lock = data_src->mem_lock;
+ int err = 0;
+
+#define HITM_INC(__f) \
+do { \
+ stats->__f++; \
+ stats->tot_hitm++; \
+} while (0)
+
+#define P(a, b) PERF_MEM_##a##_##b
+
+ stats->nr_entries++;
+
+ if (lock & P(LOCK, LOCKED)) stats->locks++;
+
+ if (op & P(OP, LOAD)) {
+ /* load */
+ stats->load++;
+
+ if (!daddr) {
+ stats->ld_noadrs++;
+ return -1;
+ }
+
+ if (lvl & P(LVL, HIT)) {
+ if (lvl & P(LVL, UNC)) stats->ld_uncache++;
+ if (lvl & P(LVL, IO)) stats->ld_io++;
+ if (lvl & P(LVL, LFB)) stats->ld_fbhit++;
+ if (lvl & P(LVL, L1 )) stats->ld_l1hit++;
+ if (lvl & P(LVL, L2 )) stats->ld_l2hit++;
+ if (lvl & P(LVL, L3 )) {
+ if (snoop & P(SNOOP, HITM))
+ HITM_INC(lcl_hitm);
+ else
+ stats->ld_llchit++;
+ }
+
+ if (lvl & P(LVL, LOC_RAM)) {
+ stats->lcl_dram++;
+ if (snoop & P(SNOOP, HIT))
+ stats->ld_shared++;
+ else
+ stats->ld_excl++;
+ }
+
+ if ((lvl & P(LVL, REM_RAM1)) ||
+ (lvl & P(LVL, REM_RAM2))) {
+ stats->rmt_dram++;
+ if (snoop & P(SNOOP, HIT))
+ stats->ld_shared++;
+ else
+ stats->ld_excl++;
+ }
+ }
+
+ if ((lvl & P(LVL, REM_CCE1)) ||
+ (lvl & P(LVL, REM_CCE2))) {
+ if (snoop & P(SNOOP, HIT))
+ stats->rmt_hit++;
+ else if (snoop & P(SNOOP, HITM))
+ HITM_INC(rmt_hitm);
+ }
+
+ if ((lvl & P(LVL, MISS)))
+ stats->ld_miss++;
+
+ } else if (op & P(OP, STORE)) {
+ /* store */
+ stats->store++;
+
+ if (!daddr) {
+ stats->st_noadrs++;
+ return -1;
+ }
+
+ if (lvl & P(LVL, HIT)) {
+ if (lvl & P(LVL, UNC)) stats->st_uncache++;
+ if (lvl & P(LVL, L1 )) stats->st_l1hit++;
+ }
+ if (lvl & P(LVL, MISS))
+ if (lvl & P(LVL, L1)) stats->st_l1miss++;
+ } else {
+ /* unparsable data_src? */
+ stats->noparse++;
+ return -1;
+ }
+
+ if (!mi->daddr.map || !mi->iaddr.map) {
+ stats->nomap++;
+ return -1;
+ }
+
+#undef P
+#undef HITM_INC
+ return err;
+}
+
+void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add)
+{
+ stats->nr_entries += add->nr_entries;
+
+ stats->locks += add->locks;
+ stats->store += add->store;
+ stats->st_uncache += add->st_uncache;
+ stats->st_noadrs += add->st_noadrs;
+ stats->st_l1hit += add->st_l1hit;
+ stats->st_l1miss += add->st_l1miss;
+ stats->load += add->load;
+ stats->ld_excl += add->ld_excl;
+ stats->ld_shared += add->ld_shared;
+ stats->ld_uncache += add->ld_uncache;
+ stats->ld_io += add->ld_io;
+ stats->ld_miss += add->ld_miss;
+ stats->ld_noadrs += add->ld_noadrs;
+ stats->ld_fbhit += add->ld_fbhit;
+ stats->ld_l1hit += add->ld_l1hit;
+ stats->ld_l2hit += add->ld_l2hit;
+ stats->ld_llchit += add->ld_llchit;
+ stats->lcl_hitm += add->lcl_hitm;
+ stats->rmt_hitm += add->rmt_hitm;
+ stats->tot_hitm += add->tot_hitm;
+ stats->rmt_hit += add->rmt_hit;
+ stats->lcl_dram += add->lcl_dram;
+ stats->rmt_dram += add->rmt_dram;
+ stats->nomap += add->nomap;
+ stats->noparse += add->noparse;
+}
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index 7f69bf9d789d..40f72ee4f42a 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -2,6 +2,10 @@
#define __PERF_MEM_EVENTS_H
#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <linux/types.h>
+#include "stat.h"
struct perf_mem_event {
bool record;
@@ -33,4 +37,38 @@ int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_script__meminfo_scnprintf(char *bf, size_t size, struct mem_info *mem_info);
+struct c2c_stats {
+ u32 nr_entries;
+
+ u32 locks; /* count of 'lock' transactions */
+ u32 store; /* count of all stores in trace */
+ u32 st_uncache; /* stores to uncacheable address */
+ u32 st_noadrs; /* cacheable store with no address */
+ u32 st_l1hit; /* count of stores that hit L1D */
+ u32 st_l1miss; /* count of stores that miss L1D */
+ u32 load; /* count of all loads in trace */
+ u32 ld_excl; /* exclusive loads, rmt/lcl DRAM - snp none/miss */
+ u32 ld_shared; /* shared loads, rmt/lcl DRAM - snp hit */
+ u32 ld_uncache; /* loads to uncacheable address */
+ u32 ld_io; /* loads to io address */
+ u32 ld_miss; /* loads miss */
+ u32 ld_noadrs; /* cacheable load with no address */
+ u32 ld_fbhit; /* count of loads hitting Fill Buffer */
+ u32 ld_l1hit; /* count of loads that hit L1D */
+ u32 ld_l2hit; /* count of loads that hit L2D */
+ u32 ld_llchit; /* count of loads that hit LLC */
+ u32 lcl_hitm; /* count of loads with local HITM */
+ u32 rmt_hitm; /* count of loads with remote HITM */
+ u32 tot_hitm; /* count of loads with local and remote HITM */
+ u32 rmt_hit; /* count of loads with remote hit clean; */
+ u32 lcl_dram; /* count of loads miss to local DRAM */
+ u32 rmt_dram; /* count of loads miss to remote DRAM */
+ u32 nomap; /* count of load/stores with no phys adrs */
+ u32 noparse; /* count of unparsable data sources */
+};
+
+struct hist_entry;
+int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi);
+void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add);
+
#endif /* __PERF_MEM_EVENTS_H */
diff --git a/tools/perf/util/parse-branch-options.c b/tools/perf/util/parse-branch-options.c
index afc088dd7d20..38fd11504015 100644
--- a/tools/perf/util/parse-branch-options.c
+++ b/tools/perf/util/parse-branch-options.c
@@ -31,59 +31,51 @@ static const struct branch_mode branch_modes[] = {
BRANCH_END
};
-int
-parse_branch_stack(const struct option *opt, const char *str, int unset)
+int parse_branch_str(const char *str, __u64 *mode)
{
#define ONLY_PLM \
(PERF_SAMPLE_BRANCH_USER |\
PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
- uint64_t *mode = (uint64_t *)opt->value;
+ int ret = 0;
+ char *p, *s;
+ char *os = NULL;
const struct branch_mode *br;
- char *s, *os = NULL, *p;
- int ret = -1;
- if (unset)
+ if (str == NULL) {
+ *mode = PERF_SAMPLE_BRANCH_ANY;
return 0;
+ }
- /*
- * cannot set it twice, -b + --branch-filter for instance
- */
- if (*mode)
+ /* because str is read-only */
+ s = os = strdup(str);
+ if (!s)
return -1;
- /* str may be NULL in case no arg is passed to -b */
- if (str) {
- /* because str is read-only */
- s = os = strdup(str);
- if (!s)
- return -1;
-
- for (;;) {
- p = strchr(s, ',');
- if (p)
- *p = '\0';
-
- for (br = branch_modes; br->name; br++) {
- if (!strcasecmp(s, br->name))
- break;
- }
- if (!br->name) {
- ui__warning("unknown branch filter %s,"
- " check man page\n", s);
- goto error;
- }
-
- *mode |= br->mode;
-
- if (!p)
- break;
+ for (;;) {
+ p = strchr(s, ',');
+ if (p)
+ *p = '\0';
- s = p + 1;
+ for (br = branch_modes; br->name; br++) {
+ if (!strcasecmp(s, br->name))
+ break;
+ }
+ if (!br->name) {
+ ret = -1;
+ pr_warning("unknown branch filter %s,"
+ " check man page\n", s);
+ goto error;
}
+
+ *mode |= br->mode;
+
+ if (!p)
+ break;
+
+ s = p + 1;
}
- ret = 0;
/* default to any branch */
if ((*mode & ~ONLY_PLM) == 0) {
@@ -93,3 +85,20 @@ error:
free(os);
return ret;
}
+
+int
+parse_branch_stack(const struct option *opt, const char *str, int unset)
+{
+ __u64 *mode = (__u64 *)opt->value;
+
+ if (unset)
+ return 0;
+
+ /*
+ * cannot set it twice, -b + --branch-filter for instance
+ */
+ if (*mode)
+ return -1;
+
+ return parse_branch_str(str, mode);
+}
diff --git a/tools/perf/util/parse-branch-options.h b/tools/perf/util/parse-branch-options.h
index b9d9470c2e82..6086fd90eb23 100644
--- a/tools/perf/util/parse-branch-options.h
+++ b/tools/perf/util/parse-branch-options.h
@@ -1,5 +1,6 @@
#ifndef _PERF_PARSE_BRANCH_OPTIONS_H
#define _PERF_PARSE_BRANCH_OPTIONS_H 1
-struct option;
+#include <stdint.h>
int parse_branch_stack(const struct option *opt, const char *str, int unset);
+int parse_branch_str(const char *str, __u64 *mode);
#endif /* _PERF_PARSE_BRANCH_OPTIONS_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4e778eae1510..3c876b8ba4de 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -22,6 +22,7 @@
#include "cpumap.h"
#include "probe-file.h"
#include "asm/bug.h"
+#include "util/parse-branch-options.h"
#define MAX_NAME_LEN 100
@@ -973,10 +974,13 @@ do { \
CHECK_TYPE_VAL(NUM);
break;
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
- /*
- * TODO uncomment when the field is available
- * attr->branch_sample_type = term->val.num;
- */
+ CHECK_TYPE_VAL(STR);
+ if (strcmp(term->val.str, "no") &&
+ parse_branch_str(term->val.str, &attr->branch_sample_type)) {
+ err->str = strdup("invalid branch sample type");
+ err->idx = term->err_val;
+ return -EINVAL;
+ }
break;
case PARSE_EVENTS__TERM_TYPE_TIME:
CHECK_TYPE_VAL(NUM);
@@ -1119,6 +1123,9 @@ do { \
case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
ADD_CONFIG_TERM(CALLGRAPH, callgraph, term->val.str);
break;
+ case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
+ ADD_CONFIG_TERM(BRANCH, branch, term->val.str);
+ break;
case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num);
break;
diff --git a/tools/perf/util/perf-hooks-list.h b/tools/perf/util/perf-hooks-list.h
new file mode 100644
index 000000000000..2867c07ee84e
--- /dev/null
+++ b/tools/perf/util/perf-hooks-list.h
@@ -0,0 +1,3 @@
+PERF_HOOK(record_start)
+PERF_HOOK(record_end)
+PERF_HOOK(test)
diff --git a/tools/perf/util/perf-hooks.c b/tools/perf/util/perf-hooks.c
new file mode 100644
index 000000000000..cb368306b12b
--- /dev/null
+++ b/tools/perf/util/perf-hooks.c
@@ -0,0 +1,88 @@
+/*
+ * perf_hooks.c
+ *
+ * Copyright (C) 2016 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2016 Huawei Inc.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <setjmp.h>
+#include <linux/err.h>
+#include "util/util.h"
+#include "util/debug.h"
+#include "util/perf-hooks.h"
+
+static sigjmp_buf jmpbuf;
+static const struct perf_hook_desc *current_perf_hook;
+
+void perf_hooks__invoke(const struct perf_hook_desc *desc)
+{
+ if (!(desc && desc->p_hook_func && *desc->p_hook_func))
+ return;
+
+ if (sigsetjmp(jmpbuf, 1)) {
+ pr_warning("Fatal error (SEGFAULT) in perf hook '%s'\n",
+ desc->hook_name);
+ *(current_perf_hook->p_hook_func) = NULL;
+ } else {
+ current_perf_hook = desc;
+ (**desc->p_hook_func)(desc->hook_ctx);
+ }
+ current_perf_hook = NULL;
+}
+
+void perf_hooks__recover(void)
+{
+ if (current_perf_hook)
+ siglongjmp(jmpbuf, 1);
+}
+
+#define PERF_HOOK(name) \
+perf_hook_func_t __perf_hook_func_##name = NULL; \
+struct perf_hook_desc __perf_hook_desc_##name = \
+ {.hook_name = #name, \
+ .p_hook_func = &__perf_hook_func_##name, \
+ .hook_ctx = NULL};
+#include "perf-hooks-list.h"
+#undef PERF_HOOK
+
+#define PERF_HOOK(name) \
+ &__perf_hook_desc_##name,
+
+static struct perf_hook_desc *perf_hooks[] = {
+#include "perf-hooks-list.h"
+};
+#undef PERF_HOOK
+
+int perf_hooks__set_hook(const char *hook_name,
+ perf_hook_func_t hook_func,
+ void *hook_ctx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(perf_hooks); i++) {
+ if (strcmp(hook_name, perf_hooks[i]->hook_name) != 0)
+ continue;
+
+ if (*(perf_hooks[i]->p_hook_func))
+ pr_warning("Overwrite existing hook: %s\n", hook_name);
+ *(perf_hooks[i]->p_hook_func) = hook_func;
+ perf_hooks[i]->hook_ctx = hook_ctx;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+perf_hook_func_t perf_hooks__get_hook(const char *hook_name)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(perf_hooks); i++) {
+ if (strcmp(hook_name, perf_hooks[i]->hook_name) != 0)
+ continue;
+
+ return *(perf_hooks[i]->p_hook_func);
+ }
+ return ERR_PTR(-ENOENT);
+}
diff --git a/tools/perf/util/perf-hooks.h b/tools/perf/util/perf-hooks.h
new file mode 100644
index 000000000000..838d5797bc1e
--- /dev/null
+++ b/tools/perf/util/perf-hooks.h
@@ -0,0 +1,39 @@
+#ifndef PERF_UTIL_PERF_HOOKS_H
+#define PERF_UTIL_PERF_HOOKS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef void (*perf_hook_func_t)(void *ctx);
+struct perf_hook_desc {
+ const char * const hook_name;
+ perf_hook_func_t * const p_hook_func;
+ void *hook_ctx;
+};
+
+extern void perf_hooks__invoke(const struct perf_hook_desc *);
+extern void perf_hooks__recover(void);
+
+#define PERF_HOOK(name) \
+extern struct perf_hook_desc __perf_hook_desc_##name; \
+static inline void perf_hooks__invoke_##name(void) \
+{ \
+ perf_hooks__invoke(&__perf_hook_desc_##name); \
+}
+
+#include "perf-hooks-list.h"
+#undef PERF_HOOK
+
+extern int
+perf_hooks__set_hook(const char *hook_name,
+ perf_hook_func_t hook_func,
+ void *hook_ctx);
+
+extern perf_hook_func_t
+perf_hooks__get_hook(const char *hook_name);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index b1474dcadfa2..dc6ccaa4e927 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -504,6 +504,7 @@ static void pmu_add_cpu_aliases(struct list_head *head)
struct pmu_events_map *map;
struct pmu_event *pe;
char *cpuid;
+ static bool printed;
cpuid = getenv("PERF_CPUID");
if (cpuid)
@@ -513,7 +514,10 @@ static void pmu_add_cpu_aliases(struct list_head *head)
if (!cpuid)
return;
- pr_debug("Using CPUID %s\n", cpuid);
+ if (!printed) {
+ pr_debug("Using CPUID %s\n", cpuid);
+ printed = true;
+ }
i = 0;
while (1) {
@@ -1135,9 +1139,11 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag,
bool is_cpu = !strcmp(pmu->name, "cpu");
if (event_glob != NULL &&
- !(strglobmatch(name, event_glob) ||
- (!is_cpu && strglobmatch(alias->name,
- event_glob))))
+ !(strglobmatch_nocase(name, event_glob) ||
+ (!is_cpu && strglobmatch_nocase(alias->name,
+ event_glob)) ||
+ (alias->topic &&
+ strglobmatch_nocase(alias->topic, event_glob))))
continue;
if (is_cpu && !name_only && !alias->desc)
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 8091d15113f7..5d4e94061402 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -18,6 +18,8 @@ struct probe_conf {
extern struct probe_conf probe_conf;
extern bool probe_event_dry_run;
+struct symbol;
+
/* kprobe-tracer and uprobe-tracer tracing point */
struct probe_trace_point {
char *realname; /* function real name (if needed) */
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index b7d4f4aeee61..0546a4304347 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -18,6 +18,7 @@ util/thread_map.c
util/util.c
util/xyarray.c
util/cgroup.c
+util/parse-branch-options.c
util/rblist.c
util/counts.c
util/strlist.c
diff --git a/tools/perf/util/quote.c b/tools/perf/util/quote.c
index 639d1da2f978..293534c1a474 100644
--- a/tools/perf/util/quote.c
+++ b/tools/perf/util/quote.c
@@ -54,7 +54,7 @@ int sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
break;
ret = sq_quote_buf(dst, argv[i]);
if (maxlen && dst->len > maxlen)
- die("Too many or long arguments");
+ return -ENOSPC;
}
return ret;
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 5d61242a6e64..f268201048a0 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2025,20 +2025,10 @@ out_delete_map:
void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
bool full)
{
- struct stat st;
- int fd, ret;
-
if (session == NULL || fp == NULL)
return;
- fd = perf_data_file__fd(session->file);
-
- ret = fstat(fd, &st);
- if (ret == -1)
- return;
-
fprintf(fp, "# ========\n");
- fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
perf_header__fprintf_info(session, fp, full);
fprintf(fp, "# ========\n#\n");
}
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 452e15a10dd2..df622f4e301e 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -315,7 +315,7 @@ struct sort_entry sort_sym = {
/* --sort srcline */
-static char *hist_entry__get_srcline(struct hist_entry *he)
+char *hist_entry__get_srcline(struct hist_entry *he)
{
struct map *map = he->ms.map;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 099c97557d33..7aff317fc7c4 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -280,4 +280,5 @@ int64_t
sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right);
int64_t
sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right);
+char *hist_entry__get_srcline(struct hist_entry *he);
#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 7f7e072be746..d8dfaf64b32e 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -193,7 +193,8 @@ error:
}
/* Glob/lazy pattern matching */
-static bool __match_glob(const char *str, const char *pat, bool ignore_space)
+static bool __match_glob(const char *str, const char *pat, bool ignore_space,
+ bool case_ins)
{
while (*str && *pat && *pat != '*') {
if (ignore_space) {
@@ -219,8 +220,13 @@ static bool __match_glob(const char *str, const char *pat, bool ignore_space)
return false;
else if (*pat == '\\') /* Escaped char match as normal char */
pat++;
- if (*str++ != *pat++)
+ if (case_ins) {
+ if (tolower(*str) != tolower(*pat))
+ return false;
+ } else if (*str != *pat)
return false;
+ str++;
+ pat++;
}
/* Check wild card */
if (*pat == '*') {
@@ -229,7 +235,7 @@ static bool __match_glob(const char *str, const char *pat, bool ignore_space)
if (!*pat) /* Tail wild card matches all */
return true;
while (*str)
- if (__match_glob(str++, pat, ignore_space))
+ if (__match_glob(str++, pat, ignore_space, case_ins))
return true;
}
return !*str && !*pat;
@@ -249,7 +255,12 @@ static bool __match_glob(const char *str, const char *pat, bool ignore_space)
*/
bool strglobmatch(const char *str, const char *pat)
{
- return __match_glob(str, pat, false);
+ return __match_glob(str, pat, false, false);
+}
+
+bool strglobmatch_nocase(const char *str, const char *pat)
+{
+ return __match_glob(str, pat, false, true);
}
/**
@@ -262,7 +273,7 @@ bool strglobmatch(const char *str, const char *pat)
*/
bool strlazymatch(const char *str, const char *pat)
{
- return __match_glob(str, pat, true);
+ return __match_glob(str, pat, true, false);
}
/**
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index aecff69a510d..df2482b2ba45 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1962,7 +1962,7 @@ static bool symbol__read_kptr_restrict(void)
char line[8];
if (fgets(line, sizeof(line), fp) != NULL)
- value = (geteuid() != 0) ?
+ value = ((geteuid() != 0) || (getuid() != 0)) ?
(atoi(line) != 0) :
(atoi(line) == 2);
@@ -2032,6 +2032,10 @@ int symbol__init(struct perf_env *env)
symbol_conf.sym_list_str, "symbol") < 0)
goto out_free_tid_list;
+ if (setup_list(&symbol_conf.bt_stop_list,
+ symbol_conf.bt_stop_list_str, "symbol") < 0)
+ goto out_free_sym_list;
+
/*
* A path to symbols of "/" is identical to ""
* reset here for simplicity.
@@ -2049,6 +2053,8 @@ int symbol__init(struct perf_env *env)
symbol_conf.initialized = true;
return 0;
+out_free_sym_list:
+ strlist__delete(symbol_conf.sym_list);
out_free_tid_list:
intlist__delete(symbol_conf.tid_list);
out_free_pid_list:
@@ -2064,6 +2070,7 @@ void symbol__exit(void)
{
if (!symbol_conf.initialized)
return;
+ strlist__delete(symbol_conf.bt_stop_list);
strlist__delete(symbol_conf.sym_list);
strlist__delete(symbol_conf.dso_list);
strlist__delete(symbol_conf.comm_list);
@@ -2071,6 +2078,7 @@ void symbol__exit(void)
intlist__delete(symbol_conf.pid_list);
vmlinux_path__exit();
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
+ symbol_conf.bt_stop_list = NULL;
symbol_conf.initialized = false;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index d964844eb314..6c358b7ed336 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -58,6 +58,7 @@ struct symbol {
u16 namelen;
u8 binding;
u8 idle:1;
+ u8 ignore:1;
u8 arch_sym;
char name[0];
};
@@ -100,6 +101,7 @@ struct symbol_conf {
show_total_period,
use_callchain,
cumulate_callchain,
+ show_branchflag_count,
exclude_other,
show_cpu_utilization,
initialized,
@@ -130,14 +132,16 @@ struct symbol_conf {
*pid_list_str,
*tid_list_str,
*sym_list_str,
- *col_width_list_str;
+ *col_width_list_str,
+ *bt_stop_list_str;
struct strlist *dso_list,
*comm_list,
*sym_list,
*dso_from_list,
*dso_to_list,
*sym_from_list,
- *sym_to_list;
+ *sym_to_list,
+ *bt_stop_list;
struct intlist *pid_list,
*tid_list;
const char *symfs;
@@ -281,7 +285,8 @@ int symbol__annotation_init(void);
struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
const struct addr_location *al,
- bool unknown_as_addr, FILE *fp);
+ bool unknown_as_addr,
+ bool print_offsets, FILE *fp);
size_t symbol__fprintf_symname_offs(const struct symbol *sym,
const struct addr_location *al, FILE *fp);
size_t __symbol__fprintf_symname(const struct symbol *sym,
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index a680bdaa65dc..7c6b33e8e2d2 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -15,14 +15,15 @@ size_t symbol__fprintf(struct symbol *sym, FILE *fp)
size_t __symbol__fprintf_symname_offs(const struct symbol *sym,
const struct addr_location *al,
- bool unknown_as_addr, FILE *fp)
+ bool unknown_as_addr,
+ bool print_offsets, FILE *fp)
{
unsigned long offset;
size_t length;
if (sym && sym->name) {
length = fprintf(fp, "%s", sym->name);
- if (al) {
+ if (al && print_offsets) {
if (al->addr < sym->end)
offset = al->addr - sym->start;
else
@@ -40,19 +41,19 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym,
const struct addr_location *al,
FILE *fp)
{
- return __symbol__fprintf_symname_offs(sym, al, false, fp);
+ return __symbol__fprintf_symname_offs(sym, al, false, true, fp);
}
size_t __symbol__fprintf_symname(const struct symbol *sym,
const struct addr_location *al,
bool unknown_as_addr, FILE *fp)
{
- return __symbol__fprintf_symname_offs(sym, al, unknown_as_addr, fp);
+ return __symbol__fprintf_symname_offs(sym, al, unknown_as_addr, false, fp);
}
size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp)
{
- return __symbol__fprintf_symname_offs(sym, NULL, false, fp);
+ return __symbol__fprintf_symname_offs(sym, NULL, false, false, fp);
}
size_t dso__fprintf_symbols_by_name(struct dso *dso,
diff --git a/tools/perf/util/time-utils.c b/tools/perf/util/time-utils.c
new file mode 100644
index 000000000000..d1b21c72206d
--- /dev/null
+++ b/tools/perf/util/time-utils.c
@@ -0,0 +1,119 @@
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+#include <linux/time64.h>
+#include <time.h>
+#include <errno.h>
+#include <inttypes.h>
+
+#include "perf.h"
+#include "debug.h"
+#include "time-utils.h"
+
+int parse_nsec_time(const char *str, u64 *ptime)
+{
+ u64 time_sec, time_nsec;
+ char *end;
+
+ time_sec = strtoul(str, &end, 10);
+ if (*end != '.' && *end != '\0')
+ return -1;
+
+ if (*end == '.') {
+ int i;
+ char nsec_buf[10];
+
+ if (strlen(++end) > 9)
+ return -1;
+
+ strncpy(nsec_buf, end, 9);
+ nsec_buf[9] = '\0';
+
+ /* make it nsec precision */
+ for (i = strlen(nsec_buf); i < 9; i++)
+ nsec_buf[i] = '0';
+
+ time_nsec = strtoul(nsec_buf, &end, 10);
+ if (*end != '\0')
+ return -1;
+ } else
+ time_nsec = 0;
+
+ *ptime = time_sec * NSEC_PER_SEC + time_nsec;
+ return 0;
+}
+
+static int parse_timestr_sec_nsec(struct perf_time_interval *ptime,
+ char *start_str, char *end_str)
+{
+ if (start_str && (*start_str != '\0') &&
+ (parse_nsec_time(start_str, &ptime->start) != 0)) {
+ return -1;
+ }
+
+ if (end_str && (*end_str != '\0') &&
+ (parse_nsec_time(end_str, &ptime->end) != 0)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr)
+{
+ char *start_str, *end_str;
+ char *d, *str;
+ int rc = 0;
+
+ if (ostr == NULL || *ostr == '\0')
+ return 0;
+
+ /* copy original string because we need to modify it */
+ str = strdup(ostr);
+ if (str == NULL)
+ return -ENOMEM;
+
+ ptime->start = 0;
+ ptime->end = 0;
+
+ /* str has the format: <start>,<stop>
+ * variations: <start>,
+ * ,<stop>
+ * ,
+ */
+ start_str = str;
+ d = strchr(start_str, ',');
+ if (d) {
+ *d = '\0';
+ ++d;
+ }
+ end_str = d;
+
+ rc = parse_timestr_sec_nsec(ptime, start_str, end_str);
+
+ free(str);
+
+ /* make sure end time is after start time if it was given */
+ if (rc == 0 && ptime->end && ptime->end < ptime->start)
+ return -EINVAL;
+
+ pr_debug("start time %" PRIu64 ", ", ptime->start);
+ pr_debug("end time %" PRIu64 "\n", ptime->end);
+
+ return rc;
+}
+
+bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp)
+{
+ /* if time is not set don't drop sample */
+ if (timestamp == 0)
+ return false;
+
+ /* otherwise compare sample time to time window */
+ if ((ptime->start && timestamp < ptime->start) ||
+ (ptime->end && timestamp > ptime->end)) {
+ return true;
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/time-utils.h b/tools/perf/util/time-utils.h
new file mode 100644
index 000000000000..c1f197c4af6c
--- /dev/null
+++ b/tools/perf/util/time-utils.h
@@ -0,0 +1,14 @@
+#ifndef _TIME_UTILS_H_
+#define _TIME_UTILS_H_
+
+struct perf_time_interval {
+ u64 start, end;
+};
+
+int parse_nsec_time(const char *str, u64 *ptime);
+
+int perf_time__parse_str(struct perf_time_interval *ptime, const char *ostr);
+
+bool perf_time__skip_sample(struct perf_time_interval *ptime, u64 timestamp);
+
+#endif
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 9df61059a85d..0ac9077f62a2 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -25,6 +25,7 @@
#include <errno.h>
#include "../perf.h"
+#include "debug.h"
#include "util.h"
#include "trace-event.h"
@@ -86,16 +87,15 @@ struct scripting_ops python_scripting_unsupported_ops = {
static void register_python_scripting(struct scripting_ops *scripting_ops)
{
- int err;
- err = script_spec_register("Python", scripting_ops);
- if (err)
- die("error registering Python script extension");
-
- err = script_spec_register("py", scripting_ops);
- if (err)
- die("error registering py script extension");
-
- scripting_context = malloc(sizeof(struct scripting_context));
+ if (scripting_context == NULL)
+ scripting_context = malloc(sizeof(*scripting_context));
+
+ if (scripting_context == NULL ||
+ script_spec_register("Python", scripting_ops) ||
+ script_spec_register("py", scripting_ops)) {
+ pr_err("Error registering Python script extension: disabling it\n");
+ zfree(&scripting_context);
+ }
}
#ifdef NO_LIBPYTHON
@@ -150,16 +150,15 @@ struct scripting_ops perl_scripting_unsupported_ops = {
static void register_perl_scripting(struct scripting_ops *scripting_ops)
{
- int err;
- err = script_spec_register("Perl", scripting_ops);
- if (err)
- die("error registering Perl script extension");
-
- err = script_spec_register("pl", scripting_ops);
- if (err)
- die("error registering pl script extension");
-
- scripting_context = malloc(sizeof(struct scripting_context));
+ if (scripting_context == NULL)
+ scripting_context = malloc(sizeof(*scripting_context));
+
+ if (scripting_context == NULL ||
+ script_spec_register("Perl", scripting_ops) ||
+ script_spec_register("pl", scripting_ops)) {
+ pr_err("Error registering Perl script extension: disabling it\n");
+ zfree(&scripting_context);
+ }
}
#ifdef NO_LIBPERL
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 20c2e5743903..6fec84dff3f7 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -357,8 +357,8 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
di.format = UNW_INFO_FORMAT_REMOTE_TABLE;
di.start_ip = map->start;
di.end_ip = map->end;
- di.u.rti.segbase = map->start + segbase;
- di.u.rti.table_data = map->start + table_data;
+ di.u.rti.segbase = map->start + segbase - map->pgoff;
+ di.u.rti.table_data = map->start + table_data - map->pgoff;
di.u.rti.table_len = fde_count * sizeof(struct table_entry)
/ sizeof(unw_word_t);
ret = dwarf_search_unwind_table(as, ip, &di, pi,
diff --git a/tools/perf/util/util-cxx.h b/tools/perf/util/util-cxx.h
new file mode 100644
index 000000000000..0e0e019c9f34
--- /dev/null
+++ b/tools/perf/util/util-cxx.h
@@ -0,0 +1,26 @@
+/*
+ * Support C++ source use utilities defined in util.h
+ */
+
+#ifndef PERF_UTIL_UTIL_CXX_H
+#define PERF_UTIL_UTIL_CXX_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Now 'new' is the only C++ keyword found in util.h:
+ * in tools/include/linux/rbtree.h
+ *
+ * Other keywords, like class and delete, should be
+ * redefined if necessary.
+ */
+#define new _new
+#include "util.h"
+#undef new
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 85c56800f17a..9ddd98827d12 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -400,37 +400,12 @@ void sighandler_dump_stack(int sig)
raise(sig);
}
-int parse_nsec_time(const char *str, u64 *ptime)
+int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz)
{
- u64 time_sec, time_nsec;
- char *end;
+ u64 sec = timestamp / NSEC_PER_SEC;
+ u64 usec = (timestamp % NSEC_PER_SEC) / NSEC_PER_USEC;
- time_sec = strtoul(str, &end, 10);
- if (*end != '.' && *end != '\0')
- return -1;
-
- if (*end == '.') {
- int i;
- char nsec_buf[10];
-
- if (strlen(++end) > 9)
- return -1;
-
- strncpy(nsec_buf, end, 9);
- nsec_buf[9] = '\0';
-
- /* make it nsec precision */
- for (i = strlen(nsec_buf); i < 9; i++)
- nsec_buf[i] = '0';
-
- time_nsec = strtoul(nsec_buf, &end, 10);
- if (*end != '\0')
- return -1;
- } else
- time_nsec = 0;
-
- *ptime = time_sec * NSEC_PER_SEC + time_nsec;
- return 0;
+ return scnprintf(buf, sz, "%"PRIu64".%06"PRIu64, sec, usec);
}
unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
@@ -629,12 +604,63 @@ bool find_process(const char *name)
return ret ? false : true;
}
+static int
+fetch_ubuntu_kernel_version(unsigned int *puint)
+{
+ ssize_t len;
+ size_t line_len = 0;
+ char *ptr, *line = NULL;
+ int version, patchlevel, sublevel, err;
+ FILE *vsig = fopen("/proc/version_signature", "r");
+
+ if (!vsig) {
+ pr_debug("Open /proc/version_signature failed: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ len = getline(&line, &line_len, vsig);
+ fclose(vsig);
+ err = -1;
+ if (len <= 0) {
+ pr_debug("Reading from /proc/version_signature failed: %s\n",
+ strerror(errno));
+ goto errout;
+ }
+
+ ptr = strrchr(line, ' ');
+ if (!ptr) {
+ pr_debug("Parsing /proc/version_signature failed: %s\n", line);
+ goto errout;
+ }
+
+ err = sscanf(ptr + 1, "%d.%d.%d",
+ &version, &patchlevel, &sublevel);
+ if (err != 3) {
+ pr_debug("Unable to get kernel version from /proc/version_signature '%s'\n",
+ line);
+ goto errout;
+ }
+
+ if (puint)
+ *puint = (version << 16) + (patchlevel << 8) + sublevel;
+ err = 0;
+errout:
+ free(line);
+ return err;
+}
+
int
fetch_kernel_version(unsigned int *puint, char *str,
size_t str_size)
{
struct utsname utsname;
int version, patchlevel, sublevel, err;
+ bool int_ver_ready = false;
+
+ if (access("/proc/version_signature", R_OK) == 0)
+ if (!fetch_ubuntu_kernel_version(puint))
+ int_ver_ready = true;
if (uname(&utsname))
return -1;
@@ -648,12 +674,12 @@ fetch_kernel_version(unsigned int *puint, char *str,
&version, &patchlevel, &sublevel);
if (err != 3) {
- pr_debug("Unablt to get kernel version from uname '%s'\n",
+ pr_debug("Unable to get kernel version from uname '%s'\n",
utsname.release);
return -1;
}
- if (puint)
+ if (puint && !int_ver_ready)
*puint = (version << 16) + (patchlevel << 8) + sublevel;
return 0;
}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 43899e0d6fa1..1d639e38aa82 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -179,8 +179,6 @@ static inline void *zalloc(size_t size)
#undef tolower
#undef toupper
-int parse_nsec_time(const char *str, u64 *ptime);
-
extern unsigned char sane_ctype[256];
#define GIT_SPACE 0x01
#define GIT_DIGIT 0x02
@@ -222,6 +220,7 @@ s64 perf_atoll(const char *str);
char **argv_split(const char *str, int *argcp);
void argv_free(char **argv);
bool strglobmatch(const char *str, const char *pat);
+bool strglobmatch_nocase(const char *str, const char *pat);
bool strlazymatch(const char *str, const char *pat);
static inline bool strisglob(const char *str)
{
@@ -361,4 +360,7 @@ extern int sched_getcpu(void);
#endif
int is_printable_array(char *p, unsigned int len);
+
+int timestamp__scnprintf_usec(u64 timestamp, char *buf, size_t sz);
+
#endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index 0fb3c1fcd3e6..5074be4ed467 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -2,15 +2,18 @@
#include "util.h"
#include "values.h"
+#include "debug.h"
-void perf_read_values_init(struct perf_read_values *values)
+int perf_read_values_init(struct perf_read_values *values)
{
values->threads_max = 16;
values->pid = malloc(values->threads_max * sizeof(*values->pid));
values->tid = malloc(values->threads_max * sizeof(*values->tid));
values->value = malloc(values->threads_max * sizeof(*values->value));
- if (!values->pid || !values->tid || !values->value)
- die("failed to allocate read_values threads arrays");
+ if (!values->pid || !values->tid || !values->value) {
+ pr_debug("failed to allocate read_values threads arrays");
+ goto out_free_pid;
+ }
values->threads = 0;
values->counters_max = 16;
@@ -18,9 +21,22 @@ void perf_read_values_init(struct perf_read_values *values)
* sizeof(*values->counterrawid));
values->countername = malloc(values->counters_max
* sizeof(*values->countername));
- if (!values->counterrawid || !values->countername)
- die("failed to allocate read_values counters arrays");
+ if (!values->counterrawid || !values->countername) {
+ pr_debug("failed to allocate read_values counters arrays");
+ goto out_free_counter;
+ }
values->counters = 0;
+
+ return 0;
+
+out_free_counter:
+ zfree(&values->counterrawid);
+ zfree(&values->countername);
+out_free_pid:
+ zfree(&values->pid);
+ zfree(&values->tid);
+ zfree(&values->value);
+ return -ENOMEM;
}
void perf_read_values_destroy(struct perf_read_values *values)
@@ -41,17 +57,27 @@ void perf_read_values_destroy(struct perf_read_values *values)
zfree(&values->countername);
}
-static void perf_read_values__enlarge_threads(struct perf_read_values *values)
+static int perf_read_values__enlarge_threads(struct perf_read_values *values)
{
- values->threads_max *= 2;
- values->pid = realloc(values->pid,
- values->threads_max * sizeof(*values->pid));
- values->tid = realloc(values->tid,
- values->threads_max * sizeof(*values->tid));
- values->value = realloc(values->value,
- values->threads_max * sizeof(*values->value));
- if (!values->pid || !values->tid || !values->value)
- die("failed to enlarge read_values threads arrays");
+ int nthreads_max = values->threads_max * 2;
+ void *npid = realloc(values->pid, nthreads_max * sizeof(*values->pid)),
+ *ntid = realloc(values->tid, nthreads_max * sizeof(*values->tid)),
+ *nvalue = realloc(values->value, nthreads_max * sizeof(*values->value));
+
+ if (!npid || !ntid || !nvalue)
+ goto out_err;
+
+ values->threads_max = nthreads_max;
+ values->pid = npid;
+ values->tid = ntid;
+ values->value = nvalue;
+ return 0;
+out_err:
+ free(npid);
+ free(ntid);
+ free(nvalue);
+ pr_debug("failed to enlarge read_values threads arrays");
+ return -ENOMEM;
}
static int perf_read_values__findnew_thread(struct perf_read_values *values,
@@ -63,15 +89,21 @@ static int perf_read_values__findnew_thread(struct perf_read_values *values,
if (values->pid[i] == pid && values->tid[i] == tid)
return i;
- if (values->threads == values->threads_max)
- perf_read_values__enlarge_threads(values);
+ if (values->threads == values->threads_max) {
+ i = perf_read_values__enlarge_threads(values);
+ if (i < 0)
+ return i;
+ }
- i = values->threads++;
+ i = values->threads + 1;
+ values->value[i] = malloc(values->counters_max * sizeof(**values->value));
+ if (!values->value[i]) {
+ pr_debug("failed to allocate read_values counters array");
+ return -ENOMEM;
+ }
values->pid[i] = pid;
values->tid[i] = tid;
- values->value[i] = malloc(values->counters_max * sizeof(**values->value));
- if (!values->value[i])
- die("failed to allocate read_values counters array");
+ values->threads = i;
return i;
}
@@ -115,16 +147,21 @@ static int perf_read_values__findnew_counter(struct perf_read_values *values,
return i;
}
-void perf_read_values_add_value(struct perf_read_values *values,
+int perf_read_values_add_value(struct perf_read_values *values,
u32 pid, u32 tid,
u64 rawid, const char *name, u64 value)
{
int tindex, cindex;
tindex = perf_read_values__findnew_thread(values, pid, tid);
+ if (tindex < 0)
+ return tindex;
cindex = perf_read_values__findnew_counter(values, rawid, name);
+ if (cindex < 0)
+ return cindex;
values->value[tindex][cindex] = value;
+ return 0;
}
static void perf_read_values__display_pretty(FILE *fp,
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
index b21a80c6cf8d..808ff9c73bf5 100644
--- a/tools/perf/util/values.h
+++ b/tools/perf/util/values.h
@@ -14,10 +14,10 @@ struct perf_read_values {
u64 **value;
};
-void perf_read_values_init(struct perf_read_values *values);
+int perf_read_values_init(struct perf_read_values *values);
void perf_read_values_destroy(struct perf_read_values *values);
-void perf_read_values_add_value(struct perf_read_values *values,
+int perf_read_values_add_value(struct perf_read_values *values,
u32 pid, u32 tid,
u64 rawid, const char *name, u64 value);
diff --git a/tools/testing/selftests/rcutorture/.gitignore b/tools/testing/selftests/rcutorture/.gitignore
index 05838f6f2ebe..ccc240275d1c 100644
--- a/tools/testing/selftests/rcutorture/.gitignore
+++ b/tools/testing/selftests/rcutorture/.gitignore
@@ -1,6 +1,4 @@
initrd
-linux-2.6
b[0-9]*
-rcu-test-image
res
*.swp
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 0aed965f0062..3b3c1b693ee1 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -303,6 +303,7 @@ then
fi
___EOF___
awk < $T/cfgcpu.pack \
+ -v TORTURE_BUILDONLY="$TORTURE_BUILDONLY" \
-v CONFIGDIR="$CONFIGFRAG/" \
-v KVM="$KVM" \
-v ncpus=$cpus \
@@ -375,6 +376,10 @@ function dump(first, pastlast, batchnum)
njitter = ncpus;
else
njitter = ja[1];
+ if (TORTURE_BUILDONLY && njitter != 0) {
+ njitter = 0;
+ print "echo Build-only run, so suppressing jitter >> " rd "/log"
+ }
for (j = 0; j < njitter; j++)
print "jitter.sh " j " " dur " " ja[2] " " ja[3] "&"
print "wait"
diff --git a/tools/testing/selftests/timers/skew_consistency.c b/tools/testing/selftests/timers/skew_consistency.c
index 5562f84ee07c..2a996e072259 100644
--- a/tools/testing/selftests/timers/skew_consistency.c
+++ b/tools/testing/selftests/timers/skew_consistency.c
@@ -57,7 +57,7 @@ int main(int argv, char **argc)
pid_t pid;
- printf("Running Asyncrhonous Frequency Changing Tests...\n");
+ printf("Running Asynchronous Frequency Changing Tests...\n");
pid = fork();
if (!pid)
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index a89f80a5b711..8c1cb423cfe6 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -6,7 +6,7 @@ include ../lib.mk
TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
check_initial_reg_state sigreturn ldt_gdt iopl \
- protection_keys
+ protection_keys test_vdso
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
new file mode 100644
index 000000000000..65d7a2bf7e14
--- /dev/null
+++ b/tools/testing/selftests/x86/test_vdso.c
@@ -0,0 +1,123 @@
+/*
+ * ldt_gdt.c - Test cases for LDT and GDT access
+ * Copyright (c) 2011-2015 Andrew Lutomirski
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <dlfcn.h>
+#include <string.h>
+#include <errno.h>
+#include <sched.h>
+#include <stdbool.h>
+
+#ifndef SYS_getcpu
+# ifdef __x86_64__
+# define SYS_getcpu 309
+# else
+# define SYS_getcpu 318
+# endif
+#endif
+
+int nerrs = 0;
+
+#ifdef __x86_64__
+# define VSYS(x) (x)
+#else
+# define VSYS(x) 0
+#endif
+
+typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
+
+const getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800);
+getcpu_t vdso_getcpu;
+
+void fill_function_pointers()
+{
+ void *vdso = dlopen("linux-vdso.so.1",
+ RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso)
+ vdso = dlopen("linux-gate.so.1",
+ RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
+ if (!vdso) {
+ printf("[WARN]\tfailed to find vDSO\n");
+ return;
+ }
+
+ vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu");
+ if (!vdso_getcpu)
+ printf("Warning: failed to find getcpu in vDSO\n");
+}
+
+static long sys_getcpu(unsigned * cpu, unsigned * node,
+ void* cache)
+{
+ return syscall(__NR_getcpu, cpu, node, cache);
+}
+
+static void test_getcpu(void)
+{
+ printf("[RUN]\tTesting getcpu...\n");
+
+ for (int cpu = 0; ; cpu++) {
+ cpu_set_t cpuset;
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
+ return;
+
+ unsigned cpu_sys, cpu_vdso, cpu_vsys,
+ node_sys, node_vdso, node_vsys;
+ long ret_sys, ret_vdso = 1, ret_vsys = 1;
+ unsigned node;
+
+ ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0);
+ if (vdso_getcpu)
+ ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0);
+ if (vgetcpu)
+ ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0);
+
+ if (!ret_sys)
+ node = node_sys;
+ else if (!ret_vdso)
+ node = node_vdso;
+ else if (!ret_vsys)
+ node = node_vsys;
+
+ bool ok = true;
+ if (!ret_sys && (cpu_sys != cpu || node_sys != node))
+ ok = false;
+ if (!ret_vdso && (cpu_vdso != cpu || node_vdso != node))
+ ok = false;
+ if (!ret_vsys && (cpu_vsys != cpu || node_vsys != node))
+ ok = false;
+
+ printf("[%s]\tCPU %u:", ok ? "OK" : "FAIL", cpu);
+ if (!ret_sys)
+ printf(" syscall: cpu %u, node %u", cpu_sys, node_sys);
+ if (!ret_vdso)
+ printf(" vdso: cpu %u, node %u", cpu_vdso, node_vdso);
+ if (!ret_vsys)
+ printf(" vsyscall: cpu %u, node %u", cpu_vsys,
+ node_vsys);
+ printf("\n");
+
+ if (!ok)
+ nerrs++;
+ }
+}
+
+int main(int argc, char **argv)
+{
+ fill_function_pointers();
+
+ test_getcpu();
+
+ return nerrs ? 1 : 0;
+}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7f9ee2929cfe..c55f5d6a7767 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1972,30 +1972,38 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
-int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len)
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, int offset, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
+ gpa_t gpa = ghc->gpa + offset;
- BUG_ON(len > ghc->len);
+ BUG_ON(len + offset > ghc->len);
if (slots->generation != ghc->generation)
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
if (unlikely(!ghc->memslot))
- return kvm_write_guest(kvm, ghc->gpa, data, len);
+ return kvm_write_guest(kvm, gpa, data, len);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
- r = __copy_to_user((void __user *)ghc->hva, data, len);
+ r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
if (r)
return -EFAULT;
- mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+ mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
return 0;
}
+EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
+
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len)
+{
+ return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
+}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,