summaryrefslogtreecommitdiff
path: root/libbcachefs/varint.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-07-13 16:31:40 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2021-07-14 20:31:38 -0400
commit86bd5c622c93509e204b6fb5de6910879fc87a70 (patch)
tree5fea647ae71ccfe276da6de85d70e6b70c1af45a /libbcachefs/varint.c
parent700d013b5280b72a1fb3830d8f70ecce5decb0ab (diff)
Update bcachefs sources to 787de128a5 bcachefs: Improvements to fsck check_dirents()
Diffstat (limited to 'libbcachefs/varint.c')
-rw-r--r--libbcachefs/varint.c73
1 files changed, 72 insertions, 1 deletions
diff --git a/libbcachefs/varint.c b/libbcachefs/varint.c
index a3d252c7..e6a04154 100644
--- a/libbcachefs/varint.c
+++ b/libbcachefs/varint.c
@@ -1,10 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
#include "varint.h"
+/**
+ * bch2_varint_encode - encode a variable length integer
+ * @out - destination to encode to
+ * @v - unsigned integer to encode
+ *
+ * Returns the size in bytes of the encoded integer - at most 9 bytes
+ */
int bch2_varint_encode(u8 *out, u64 v)
{
unsigned bits = fls64(v|1);
@@ -13,17 +21,80 @@ int bch2_varint_encode(u8 *out, u64 v)
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0 << (bytes - 1));
+ v = cpu_to_le64(v);
+ memcpy(out, &v, bytes);
} else {
*out++ = 255;
bytes = 9;
+ put_unaligned_le64(v, out);
}
- put_unaligned_le64(v, out);
return bytes;
}
+/**
+ * bch2_varint_decode - encode a variable length integer
+ * @in - varint to decode
+ * @end - end of buffer to decode from
+ * @out - on success, decoded integer
+ *
+ * Returns the size in bytes of the decoded integer - or -1 on failure (would
+ * have read past the end of the buffer)
+ */
int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
{
+ unsigned bytes = likely(in < end)
+ ? ffz(*in & 255) + 1
+ : 1;
+ u64 v;
+
+ if (unlikely(in + bytes > end))
+ return -1;
+
+ if (likely(bytes < 9)) {
+ v = 0;
+ memcpy(&v, in, bytes);
+ v = le64_to_cpu(v);
+ v >>= bytes;
+ } else {
+ v = get_unaligned_le64(++in);
+ }
+
+ *out = v;
+ return bytes;
+}
+
+/**
+ * bch2_varint_encode_fast - fast version of bch2_varint_encode
+ *
+ * This version assumes it's always safe to write 8 bytes to @out, even if the
+ * encoded integer would be smaller.
+ */
+int bch2_varint_encode_fast(u8 *out, u64 v)
+{
+ unsigned bits = fls64(v|1);
+ unsigned bytes = DIV_ROUND_UP(bits, 7);
+
+ if (likely(bytes < 9)) {
+ v <<= bytes;
+ v |= ~(~0 << (bytes - 1));
+ } else {
+ *out++ = 255;
+ bytes = 9;
+ }
+
+ put_unaligned_le64(v, out);
+ return bytes;
+}
+
+/**
+ * bch2_varint_decode_fast - fast version of bch2_varint_decode
+ *
+ * This version assumes that it is safe to read at most 8 bytes past the end of
+ * @end (we still return an error if the varint extends past @end).
+ */
+int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out)
+{
u64 v = get_unaligned_le64(in);
unsigned bytes = ffz(v & 255) + 1;