summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
authorAhmed S. Darwish <a.darwish@linutronix.de>2021-10-16 10:49:08 +0200
committerDavid S. Miller <davem@davemloft.net>2021-10-18 12:54:41 +0100
commitf56940daa5a74fb20b5f5487535549949f2d8d0c (patch)
treef50ec19fb2903f670b8f6ed33c89be3bb8694503 /net/sched
parent67c9e6270f3013e4d86ec57c4e7f27459f2a0652 (diff)
net: sched: Use _bstats_update/set() instead of raw writes
The Qdisc::running sequence counter, used to protect Qdisc::bstats reads from parallel writes, is in the process of being removed. Qdisc::bstats read/writes will synchronize using an internal u64_stats sync point instead. Modify all bstats writes to use _bstats_update(). This ensures that the internal u64_stats sync point is always acquired and released as appropriate. Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_cbq.c3
-rw-r--r--net/sched/sch_gred.c7
-rw-r--r--net/sched/sch_htb.c25
-rw-r--r--net/sched/sch_qfq.c3
4 files changed, 21 insertions, 17 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d01f6ec315f8..ef9e87175d35 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -565,8 +565,7 @@ cbq_update(struct cbq_sched_data *q)
long avgidle = cl->avgidle;
long idle;
- cl->bstats.packets++;
- cl->bstats.bytes += len;
+ _bstats_update(&cl->bstats, len, 1);
/*
* (now - last) is total time between packet right edges.
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 2ddcbb2efdbb..02b03d6d24ea 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -353,6 +353,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
{
struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_qopt_offload *hw_stats;
+ u64 bytes = 0, packets = 0;
unsigned int i;
int ret;
@@ -381,15 +382,15 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
- _bstats_update(&sch->bstats,
- hw_stats->stats.bstats[i].bytes,
- hw_stats->stats.bstats[i].packets);
+ bytes += hw_stats->stats.bstats[i].bytes;
+ packets += hw_stats->stats.bstats[i].packets;
sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
sch->qstats.drops += hw_stats->stats.qstats[i].drops;
sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
}
+ _bstats_update(&sch->bstats, bytes, packets);
kfree(hw_stats);
return ret;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 2e805b17efcf..324ecfdf842a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1308,6 +1308,7 @@ nla_put_failure:
static void htb_offload_aggregate_stats(struct htb_sched *q,
struct htb_class *cl)
{
+ u64 bytes = 0, packets = 0;
struct htb_class *c;
unsigned int i;
@@ -1323,14 +1324,15 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
if (p != cl)
continue;
- cl->bstats.bytes += c->bstats_bias.bytes;
- cl->bstats.packets += c->bstats_bias.packets;
+ bytes += c->bstats_bias.bytes;
+ packets += c->bstats_bias.packets;
if (c->level == 0) {
- cl->bstats.bytes += c->leaf.q->bstats.bytes;
- cl->bstats.packets += c->leaf.q->bstats.packets;
+ bytes += c->leaf.q->bstats.bytes;
+ packets += c->leaf.q->bstats.packets;
}
}
}
+ _bstats_update(&cl->bstats, bytes, packets);
}
static int
@@ -1358,8 +1360,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
cl->bstats = cl->leaf.q->bstats;
else
gnet_stats_basic_packed_init(&cl->bstats);
- cl->bstats.bytes += cl->bstats_bias.bytes;
- cl->bstats.packets += cl->bstats_bias.packets;
+ _bstats_update(&cl->bstats,
+ cl->bstats_bias.bytes,
+ cl->bstats_bias.packets);
} else {
htb_offload_aggregate_stats(q, cl);
}
@@ -1578,8 +1581,9 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
WARN_ON(old != q);
if (cl->parent) {
- cl->parent->bstats_bias.bytes += q->bstats.bytes;
- cl->parent->bstats_bias.packets += q->bstats.packets;
+ _bstats_update(&cl->parent->bstats_bias,
+ q->bstats.bytes,
+ q->bstats.packets);
}
offload_opt = (struct tc_htb_qopt_offload) {
@@ -1925,8 +1929,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
htb_graft_helper(dev_queue, old_q);
goto err_kill_estimator;
}
- parent->bstats_bias.bytes += old_q->bstats.bytes;
- parent->bstats_bias.packets += old_q->bstats.packets;
+ _bstats_update(&parent->bstats_bias,
+ old_q->bstats.bytes,
+ old_q->bstats.packets);
qdisc_put(old_q);
}
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index b6d989b69324..bea68c91027a 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1235,8 +1235,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- cl->bstats.bytes += len;
- cl->bstats.packets += gso_segs;
+ _bstats_update(&cl->bstats, len, gso_segs);
sch->qstats.backlog += len;
++sch->q.qlen;