Skip to content

Commit a6edf2c

Browse files
edumazetkuba-moo
authored andcommitted
net_sched: sch_hhf: annotate data-races in hhf_dump_stats()
hhf_dump_stats() only runs with RTNL held, reading fields that can be changed in qdisc fast path. Add READ_ONCE()/WRITE_ONCE() annotations. Fixes: edb09eb ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump") Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com> Link: https://patch.msgid.link/20260421143349.4052215-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent 9d146a5 commit a6edf2c

1 file changed

Lines changed: 10 additions & 9 deletions

File tree

net/sched/sch_hhf.c

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -198,7 +198,8 @@ static struct hh_flow_state *seek_list(const u32 hash,
198198
return NULL;
199199
list_del(&flow->flowchain);
200200
kfree(flow);
201-
q->hh_flows_current_cnt--;
201+
WRITE_ONCE(q->hh_flows_current_cnt,
202+
q->hh_flows_current_cnt - 1);
202203
} else if (flow->hash_id == hash) {
203204
return flow;
204205
}
@@ -226,15 +227,15 @@ static struct hh_flow_state *alloc_new_hh(struct list_head *head,
226227
}
227228

228229
if (q->hh_flows_current_cnt >= q->hh_flows_limit) {
229-
q->hh_flows_overlimit++;
230+
WRITE_ONCE(q->hh_flows_overlimit, q->hh_flows_overlimit + 1);
230231
return NULL;
231232
}
232233
/* Create new entry. */
233234
flow = kzalloc_obj(struct hh_flow_state, GFP_ATOMIC);
234235
if (!flow)
235236
return NULL;
236237

237-
q->hh_flows_current_cnt++;
238+
WRITE_ONCE(q->hh_flows_current_cnt, q->hh_flows_current_cnt + 1);
238239
INIT_LIST_HEAD(&flow->flowchain);
239240
list_add_tail(&flow->flowchain, head);
240241

@@ -309,7 +310,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
309310
return WDRR_BUCKET_FOR_NON_HH;
310311
flow->hash_id = hash;
311312
flow->hit_timestamp = now;
312-
q->hh_flows_total_cnt++;
313+
WRITE_ONCE(q->hh_flows_total_cnt, q->hh_flows_total_cnt + 1);
313314

314315
/* By returning without updating counters in q->hhf_arrays,
315316
* we implicitly implement "shielding" (see Optimization O1).
@@ -403,7 +404,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
403404
return NET_XMIT_SUCCESS;
404405

405406
prev_backlog = sch->qstats.backlog;
406-
q->drop_overlimit++;
407+
WRITE_ONCE(q->drop_overlimit, q->drop_overlimit + 1);
407408
/* Return Congestion Notification only if we dropped a packet from this
408409
* bucket.
409410
*/
@@ -686,10 +687,10 @@ static int hhf_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
686687
{
687688
struct hhf_sched_data *q = qdisc_priv(sch);
688689
struct tc_hhf_xstats st = {
689-
.drop_overlimit = q->drop_overlimit,
690-
.hh_overlimit = q->hh_flows_overlimit,
691-
.hh_tot_count = q->hh_flows_total_cnt,
692-
.hh_cur_count = q->hh_flows_current_cnt,
690+
.drop_overlimit = READ_ONCE(q->drop_overlimit),
691+
.hh_overlimit = READ_ONCE(q->hh_flows_overlimit),
692+
.hh_tot_count = READ_ONCE(q->hh_flows_total_cnt),
693+
.hh_cur_count = READ_ONCE(q->hh_flows_current_cnt),
693694
};
694695

695696
return gnet_stats_copy_app(d, &st, sizeof(st));

0 commit comments

Comments
 (0)