summaryrefslogtreecommitdiff
path: root/libbcachefs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-05-25 17:52:28 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2023-05-25 22:25:34 -0400
commit1f78fed4693a5361f56508daac59bebd5b556379 (patch)
tree267c710018040b6caa9193a1ee34e514317709c4 /libbcachefs
parentb8b8dcfaed641eabeec8ba070e1e23665bc4ceb2 (diff)
Update bcachefs sources to 31c09369cd six locks: Fix an unitialized var
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'libbcachefs')
-rw-r--r--libbcachefs/alloc_background.c6
-rw-r--r--libbcachefs/bkey.c2
-rw-r--r--libbcachefs/bkey.h8
-rw-r--r--libbcachefs/btree_cache.c21
-rw-r--r--libbcachefs/btree_io.c2
-rw-r--r--libbcachefs/btree_iter.c3
-rw-r--r--libbcachefs/btree_iter.h9
-rw-r--r--libbcachefs/btree_key_cache.c19
-rw-r--r--libbcachefs/btree_locking.c15
-rw-r--r--libbcachefs/btree_locking.h12
-rw-r--r--libbcachefs/btree_update_interior.c2
-rw-r--r--libbcachefs/buckets.c16
-rw-r--r--libbcachefs/buckets.h18
-rw-r--r--libbcachefs/trace.h8
-rw-r--r--libbcachefs/util.c17
15 files changed, 81 insertions, 77 deletions
diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c
index dcdef3bc..f774a660 100644
--- a/libbcachefs/alloc_background.c
+++ b/libbcachefs/alloc_background.c
@@ -269,9 +269,9 @@ int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
int rw = flags & WRITE;
- if (alloc_v4_u64s(a.v) != bkey_val_u64s(k.k)) {
- prt_printf(err, "bad val size (%lu != %u)",
- bkey_val_u64s(k.k), alloc_v4_u64s(a.v));
+ if (alloc_v4_u64s(a.v) > bkey_val_u64s(k.k)) {
+ prt_printf(err, "bad val size (%u > %lu)",
+ alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
return -BCH_ERR_invalid_bkey;
}
diff --git a/libbcachefs/bkey.c b/libbcachefs/bkey.c
index b58b876f..ee7ba700 100644
--- a/libbcachefs/bkey.c
+++ b/libbcachefs/bkey.c
@@ -724,7 +724,7 @@ unsigned bch2_bkey_ffs(const struct btree *b, const struct bkey_packed *k)
return 0;
}
-#ifdef CONFIG_X86_64
+#ifdef HAVE_BCACHEFS_COMPILED_UNPACK
#define I(_x) (*(out)++ = (_x))
#define I1(i0) I(i0)
diff --git a/libbcachefs/bkey.h b/libbcachefs/bkey.h
index 727bed99..e81fb3e0 100644
--- a/libbcachefs/bkey.h
+++ b/libbcachefs/bkey.h
@@ -9,9 +9,17 @@
#include "util.h"
#include "vstructs.h"
+#if 0
+
+/*
+ * compiled unpack functions are disabled, pending a new interface for
+ * dynamically allocating executable memory:
+ */
+
#ifdef CONFIG_X86_64
#define HAVE_BCACHEFS_COMPILED_UNPACK 1
#endif
+#endif
void bch2_bkey_packed_to_binary_text(struct printbuf *,
const struct bkey_format *,
diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c
index 73d32688..f8402709 100644
--- a/libbcachefs/btree_cache.c
+++ b/libbcachefs/btree_cache.c
@@ -62,10 +62,12 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
EBUG_ON(btree_node_write_in_flight(b));
+ clear_btree_node_just_written(b);
+
kvpfree(b->data, btree_bytes(c));
b->data = NULL;
#ifdef __KERNEL__
- vfree(b->aux_data);
+ kvfree(b->aux_data);
#else
munmap(b->aux_data, btree_aux_data_bytes(b));
#endif
@@ -100,7 +102,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
if (!b->data)
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
#ifdef __KERNEL__
- b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
+ b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
#else
b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
PROT_READ|PROT_WRITE|PROT_EXEC,
@@ -126,7 +128,6 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
return NULL;
bkey_btree_ptr_init(&b->key);
- bch2_btree_lock_init(&b->c);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
lockdep_set_no_check_recursion(&b->c.lock.dep_map);
#endif
@@ -150,6 +151,8 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
return NULL;
}
+ bch2_btree_lock_init(&b->c, 0);
+
bc->used++;
list_add(&b->list, &bc->freeable);
return b;
@@ -484,7 +487,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
while (!list_empty(&bc->freed_nonpcpu)) {
b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
list_del(&b->list);
- six_lock_pcpu_free(&b->c.lock);
+ six_lock_exit(&b->c.lock);
kfree(b);
}
@@ -645,8 +648,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
mutex_lock(&bc->lock);
}
- if (pcpu_read_locks)
- six_lock_pcpu_alloc(&b->c.lock);
+ bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
BUG_ON(!six_trylock_intent(&b->c.lock));
BUG_ON(!six_trylock_write(&b->c.lock));
@@ -700,6 +702,7 @@ err:
/* Try to cannibalize another cached btree node: */
if (bc->alloc_lock == current) {
b2 = btree_node_cannibalize(c);
+ clear_btree_node_just_written(b2);
bch2_btree_node_hash_remove(bc, b2);
if (b) {
@@ -784,7 +787,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
set_btree_node_read_in_flight(b);
six_unlock_write(&b->c.lock);
- seq = b->c.lock.state.seq;
+ seq = six_lock_seq(&b->c.lock);
six_unlock_intent(&b->c.lock);
/* Unlock before doing IO: */
@@ -908,7 +911,7 @@ retry:
}
if (unlikely(btree_node_read_in_flight(b))) {
- u32 seq = b->c.lock.state.seq;
+ u32 seq = six_lock_seq(&b->c.lock);
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(trans);
@@ -1006,7 +1009,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
}
if (unlikely(btree_node_read_in_flight(b))) {
- u32 seq = b->c.lock.state.seq;
+ u32 seq = six_lock_seq(&b->c.lock);
six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(trans);
diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c
index decbbaac..0a7a18ec 100644
--- a/libbcachefs/btree_io.c
+++ b/libbcachefs/btree_io.c
@@ -483,7 +483,7 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
struct btree_node_entry *bne;
bool reinit_iter = false;
- EBUG_ON(!(b->c.lock.state.seq & 1));
+ EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
BUG_ON(bset_written(b, bset(b, &b->set[1])));
BUG_ON(btree_node_just_written(b));
diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c
index 365794dc..4b9c04dc 100644
--- a/libbcachefs/btree_iter.c
+++ b/libbcachefs/btree_iter.c
@@ -652,9 +652,8 @@ void bch2_btree_path_level_init(struct btree_trans *trans,
BUG_ON(path->cached);
EBUG_ON(!btree_path_pos_in_node(path, b));
- EBUG_ON(b->c.lock.state.seq & 1);
- path->l[b->c.level].lock_seq = b->c.lock.state.seq;
+ path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
path->l[b->c.level].b = b;
__btree_path_level_init(path, b->c.level);
}
diff --git a/libbcachefs/btree_iter.h b/libbcachefs/btree_iter.h
index 02dd81a1..198e3815 100644
--- a/libbcachefs/btree_iter.h
+++ b/libbcachefs/btree_iter.h
@@ -42,14 +42,7 @@ static inline struct btree *btree_path_node(struct btree_path *path,
static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
const struct btree *b, unsigned level)
{
- /*
- * We don't compare the low bits of the lock sequence numbers because
- * @path might have taken a write lock on @b, and we don't want to skip
- * the linked path if the sequence numbers were equal before taking that
- * write lock. The lock sequence number is incremented by taking and
- * releasing write locks and is even when unlocked:
- */
- return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
+ return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
}
static inline struct btree *btree_node_parent(struct btree_path *path,
diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c
index 3b333e3b..645fa994 100644
--- a/libbcachefs/btree_key_cache.c
+++ b/libbcachefs/btree_key_cache.c
@@ -252,7 +252,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
}
path->l[0].b = (void *) ck;
- path->l[0].lock_seq = ck->c.lock.state.seq;
+ path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
ret = bch2_btree_node_lock_write(trans, path, &ck->c);
@@ -283,9 +283,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path,
return NULL;
init:
INIT_LIST_HEAD(&ck->list);
- bch2_btree_lock_init(&ck->c);
- if (pcpu_readers)
- six_lock_pcpu_alloc(&ck->c.lock);
+ bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
ck->c.cached = true;
BUG_ON(!six_trylock_intent(&ck->c.lock));
@@ -341,9 +339,6 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
}
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
- } else {
- if (path->btree_id == BTREE_ID_subvolumes)
- six_lock_pcpu_alloc(&ck->c.lock);
}
ck->c.level = 0;
@@ -512,7 +507,7 @@ retry:
mark_btree_node_locked(trans, path, 0, lock_want);
}
- path->l[0].lock_seq = ck->c.lock.state.seq;
+ path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
path->l[0].b = (void *) ck;
fill:
path->uptodate = BTREE_ITER_UPTODATE;
@@ -594,7 +589,7 @@ retry:
mark_btree_node_locked(trans, path, 0, lock_want);
}
- path->l[0].lock_seq = ck->c.lock.state.seq;
+ path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
path->l[0].b = (void *) ck;
fill:
if (!ck->valid)
@@ -872,7 +867,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
break;
list_del(&ck->list);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
scanned++;
@@ -888,7 +883,7 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
break;
list_del(&ck->list);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
atomic_long_dec(&bc->nr_freed);
scanned++;
@@ -1013,7 +1008,7 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
list_del(&ck->list);
kfree(ck->k);
- six_lock_pcpu_free(&ck->c.lock);
+ six_lock_exit(&ck->c.lock);
kmem_cache_free(bch2_key_cache, ck);
}
diff --git a/libbcachefs/btree_locking.c b/libbcachefs/btree_locking.c
index b9998665..70639a15 100644
--- a/libbcachefs/btree_locking.c
+++ b/libbcachefs/btree_locking.c
@@ -6,9 +6,10 @@
static struct lock_class_key bch2_btree_node_lock_key;
-void bch2_btree_lock_init(struct btree_bkey_cached_common *b)
+void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
+ enum six_lock_init_flags flags)
{
- __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key);
+ __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
}
#ifdef CONFIG_LOCKDEP
@@ -20,16 +21,6 @@ void bch2_assert_btree_nodes_not_locked(void)
/* Btree node locking: */
-static inline void six_lock_readers_add(struct six_lock *lock, int nr)
-{
- if (lock->readers)
- this_cpu_add(*lock->readers, nr);
- else if (nr > 0)
- atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
- else
- atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
-}
-
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
struct btree_path *skip,
struct btree_bkey_cached_common *b,
diff --git a/libbcachefs/btree_locking.h b/libbcachefs/btree_locking.h
index 327780ce..b341cc89 100644
--- a/libbcachefs/btree_locking.h
+++ b/libbcachefs/btree_locking.h
@@ -14,7 +14,7 @@
#include "btree_iter.h"
-void bch2_btree_lock_init(struct btree_bkey_cached_common *);
+void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
#ifdef CONFIG_LOCKDEP
void bch2_assert_btree_nodes_not_locked(void);
@@ -176,13 +176,13 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat
struct btree_path *linked;
EBUG_ON(path->l[b->c.level].b != b);
- EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
+ EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
trans_for_each_path_with_node(trans, b, linked)
- linked->l[b->c.level].lock_seq += 2;
+ linked->l[b->c.level].lock_seq++;
six_unlock_write(&b->c.lock);
}
@@ -206,8 +206,8 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans,
trans->lock_must_abort = false;
trans->locking = b;
- ret = six_lock_type_ip_waiter(&b->lock, type, &trans->locking_wait,
- bch2_six_check_for_deadlock, trans, ip);
+ ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
+ bch2_six_check_for_deadlock, trans, ip);
WRITE_ONCE(trans->locking, NULL);
WRITE_ONCE(trans->locking_wait.start_time, 0);
return ret;
@@ -284,7 +284,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans,
bool lock_may_not_fail)
{
EBUG_ON(&path->l[b->level].b->c != b);
- EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq);
+ EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
EBUG_ON(!btree_node_intent_locked(path, b->level));
/*
diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c
index 6ba0954e..1319337c 100644
--- a/libbcachefs/btree_update_interior.c
+++ b/libbcachefs/btree_update_interior.c
@@ -688,7 +688,7 @@ err:
bch2_trans_unlock(&trans);
btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_intent);
mark_btree_node_locked(&trans, path, b->c.level, SIX_LOCK_intent);
- path->l[b->c.level].lock_seq = b->c.lock.state.seq;
+ path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
path->l[b->c.level].b = b;
bch2_btree_node_lock_write_nofail(&trans, path, &b->c);
diff --git a/libbcachefs/buckets.c b/libbcachefs/buckets.c
index bce42eef..bd144182 100644
--- a/libbcachefs/buckets.c
+++ b/libbcachefs/buckets.c
@@ -137,17 +137,17 @@ u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
{
struct bch_fs_usage_online *ret;
- unsigned seq, i, v, u64s = fs_usage_u64s(c) + 1;
+ unsigned nr_replicas = READ_ONCE(c->replicas.nr);
+ unsigned seq, i;
retry:
- ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
+ ret = kmalloc(__fs_usage_online_u64s(nr_replicas) * sizeof(u64), GFP_NOFS);
if (unlikely(!ret))
return NULL;
percpu_down_read(&c->mark_lock);
- v = fs_usage_u64s(c) + 1;
- if (unlikely(u64s != v)) {
- u64s = v;
+ if (nr_replicas != c->replicas.nr) {
+ nr_replicas = c->replicas.nr;
percpu_up_read(&c->mark_lock);
kfree(ret);
goto retry;
@@ -157,10 +157,12 @@ retry:
do {
seq = read_seqcount_begin(&c->usage_lock);
- unsafe_memcpy(&ret->u, c->usage_base, u64s * sizeof(u64),
+ unsafe_memcpy(&ret->u, c->usage_base,
+ __fs_usage_u64s(nr_replicas) * sizeof(u64),
"embedded variable length struct");
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
- acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
+ acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i],
+ __fs_usage_u64s(nr_replicas));
} while (read_seqcount_retry(&c->usage_lock, seq));
return ret;
diff --git a/libbcachefs/buckets.h b/libbcachefs/buckets.h
index d677b022..bdf4fff9 100644
--- a/libbcachefs/buckets.h
+++ b/libbcachefs/buckets.h
@@ -207,10 +207,24 @@ static inline u64 dev_buckets_available(struct bch_dev *ca,
/* Filesystem usage: */
+static inline unsigned __fs_usage_u64s(unsigned nr_replicas)
+{
+ return sizeof(struct bch_fs_usage) / sizeof(u64) + nr_replicas;
+}
+
static inline unsigned fs_usage_u64s(struct bch_fs *c)
{
- return sizeof(struct bch_fs_usage) / sizeof(u64) +
- READ_ONCE(c->replicas.nr);
+ return __fs_usage_u64s(READ_ONCE(c->replicas.nr));
+}
+
+static inline unsigned __fs_usage_online_u64s(unsigned nr_replicas)
+{
+ return sizeof(struct bch_fs_usage_online) / sizeof(u64) + nr_replicas;
+}
+
+static inline unsigned fs_usage_online_u64s(struct bch_fs *c)
+{
+ return __fs_usage_online_u64s(READ_ONCE(c->replicas.nr));
}
static inline unsigned dev_usage_u64s(void)
diff --git a/libbcachefs/trace.h b/libbcachefs/trace.h
index 8027c2a1..cfb1779d 100644
--- a/libbcachefs/trace.h
+++ b/libbcachefs/trace.h
@@ -420,7 +420,9 @@ TRACE_EVENT(btree_path_relock_fail,
else
scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
__entry->iter_lock_seq = path->l[level].lock_seq;
- __entry->node_lock_seq = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
+ __entry->node_lock_seq = is_btree_node(path, level)
+ ? six_lock_seq(&path->l[level].b->c.lock)
+ : 0;
),
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s iter seq %u lock seq %u",
@@ -475,7 +477,9 @@ TRACE_EVENT(btree_path_upgrade_fail,
__entry->read_count = c.n[SIX_LOCK_read];
__entry->intent_count = c.n[SIX_LOCK_read];
__entry->iter_lock_seq = path->l[level].lock_seq;
- __entry->node_lock_seq = is_btree_node(path, level) ? path->l[level].b->c.lock.state.seq : 0;
+ __entry->node_lock_seq = is_btree_node(path, level)
+ ? six_lock_seq(&path->l[level].b->c.lock)
+ : 0;
),
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
diff --git a/libbcachefs/util.c b/libbcachefs/util.c
index dfc55fe4..90796863 100644
--- a/libbcachefs/util.c
+++ b/libbcachefs/util.c
@@ -350,11 +350,8 @@ static inline void bch2_time_stats_update_one(struct bch2_time_stats *stats,
if (time_after64(end, start)) {
duration = end - start;
- stats->duration_stats = mean_and_variance_update_inlined(stats->duration_stats,
- duration);
- stats->duration_stats_weighted = mean_and_variance_weighted_update(
- stats->duration_stats_weighted,
- duration);
+ stats->duration_stats = mean_and_variance_update(stats->duration_stats, duration);
+ mean_and_variance_weighted_update(&stats->duration_stats_weighted, duration);
stats->max_duration = max(stats->max_duration, duration);
stats->min_duration = min(stats->min_duration, duration);
bch2_quantiles_update(&stats->quantiles, duration);
@@ -362,10 +359,8 @@ static inline void bch2_time_stats_update_one(struct bch2_time_stats *stats,
if (time_after64(end, stats->last_event)) {
freq = end - stats->last_event;
- stats->freq_stats = mean_and_variance_update_inlined(stats->freq_stats, freq);
- stats->freq_stats_weighted = mean_and_variance_weighted_update(
- stats->freq_stats_weighted,
- freq);
+ stats->freq_stats = mean_and_variance_update(stats->freq_stats, freq);
+ mean_and_variance_weighted_update(&stats->freq_stats_weighted, freq);
stats->max_freq = max(stats->max_freq, freq);
stats->min_freq = min(stats->min_freq, freq);
stats->last_event = end;
@@ -594,8 +589,8 @@ void bch2_time_stats_exit(struct bch2_time_stats *stats)
void bch2_time_stats_init(struct bch2_time_stats *stats)
{
memset(stats, 0, sizeof(*stats));
- stats->duration_stats_weighted.w = 8;
- stats->freq_stats_weighted.w = 8;
+ stats->duration_stats_weighted.weight = 8;
+ stats->freq_stats_weighted.weight = 8;
stats->min_duration = U64_MAX;
stats->min_freq = U64_MAX;
spin_lock_init(&stats->lock);