summaryrefslogtreecommitdiff
path: root/tools/lib/bpf/libbpf_util.h
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2019-04-16 20:13:10 -0700
committerAlexei Starovoitov <ast@kernel.org>2019-04-16 20:13:11 -0700
commit00967e84f742f87603e769529628e32076ade188 (patch)
tree851eb8e92dd057e389e6a7b733dbda56f8a1d666 /tools/lib/bpf/libbpf_util.h
parentd1b7725dfea3e6c1aff2ee94baf3658aba450fbe (diff)
parent2c5935f1b2b642cee8e1562396ec8a7781fc4c6d (diff)
Merge branch 'af_xdp-smp_mb-fixes'
Magnus Karlsson says: ==================== This patch set fixes one bug and removes two dependencies on Linux kernel headers from the XDP socket code in libbpf. A number of people have pointed out that these two dependencies make it hard to build the XDP socket part of libbpf without any kernel header dependencies. The two removed dependecies are: * Remove the usage of likely and unlikely (compiler.h) in xsk.h. It has been reported that the use of these actually decreases the performance of the ring access code due to an increase in instruction cache misses, so let us just remove these. * Remove the dependency on barrier.h as it brings in a lot of kernel headers. As the XDP socket code only uses two simple functions from it, we can reimplement these. As a bonus, the new implementation is faster as it uses the same barrier primitives as the kernel does when the same code is compiled there. Without this patch, the user land code uses lfence and sfence on x86, which are unnecessarily harsh/thorough. In the process of removing these dependencies a missing barrier function for at least PPC64 was discovered. For a full explanation on the missing barrier, please refer to patch 1. So the patch set now starts with two patches fixing this. I have also added a patch at the end removing this full memory barrier for x86 only, as it is not needed there. Structure of the patch set: Patch 1-2: Adds the missing barrier function in kernel and user space. Patch 3-4: Removes the dependencies Patch 5: Optimizes the added barrier from patch 2 so that it does not do unnecessary work on x86. v2 -> v3: * Added missing memory barrier in ring code * Added an explanation on the three barriers we use in the code * Moved barrier functions from xsk.h to libbpf_util.h * Added comment on why we have these functions in libbpf_util.h * Added a new barrier function in user space that makes it possible to remove the full memory barrier on x86. v1 -> v2: * Added comment about validity of ARM 32-bit barriers. Only armv7 and above. /Magnus ==================== Acked-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/lib/bpf/libbpf_util.h')
-rw-r--r--tools/lib/bpf/libbpf_util.h30
1 files changed, 30 insertions, 0 deletions
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h
index 81ecda0cb9c9..172b707e007b 100644
--- a/tools/lib/bpf/libbpf_util.h
+++ b/tools/lib/bpf/libbpf_util.h
@@ -23,6 +23,36 @@ do { \
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+/* Use these barrier functions instead of smp_[rw]mb() when they are
+ * used in a libbpf header file. That way they can be built into the
+ * application that uses libbpf.
+ */
+#if defined(__i386__) || defined(__x86_64__)
+# define libbpf_smp_rmb() asm volatile("" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("" : : : "memory")
+# define libbpf_smp_mb() \
+ asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
+/* Hinders stores to be observed before older loads. */
+# define libbpf_smp_rwmb() asm volatile("" : : : "memory")
+#elif defined(__aarch64__)
+# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
+# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
+# define libbpf_smp_rwmb() libbpf_smp_mb()
+#elif defined(__arm__)
+/* These are only valid for armv7 and above */
+# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory")
+# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
+# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
+# define libbpf_smp_rwmb() libbpf_smp_mb()
+#else
+# warning Architecture missing native barrier functions in libbpf_util.h.
+# define libbpf_smp_rmb() __sync_synchronize()
+# define libbpf_smp_wmb() __sync_synchronize()
+# define libbpf_smp_mb() __sync_synchronize()
+# define libbpf_smp_rwmb() __sync_synchronize()
+#endif
+
#ifdef __cplusplus
} /* extern "C" */
#endif