Skip to content

Commit a8f5192

Browse files
edumazetkuba-moo
authored andcommitted
net/sched: sch_red: annotate data-races in red_dump_stats()
red_dump_stats() only runs with RTNL held, reading fields that can be changed in qdisc fast path. Add READ_ONCE()/WRITE_ONCE() annotations. Alternative would be to acquire the qdisc spinlock, but our long-term goal is to make qdisc dump operations lockless as much as we can. tc_red_xstats fields don't need to be latched atomically, otherwise this bug would have been caught earlier. Fixes: edb09eb ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump") Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com> Link: https://patch.msgid.link/20260421142309.3964322-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
1 parent bbfaa73 commit a8f5192

1 file changed

Lines changed: 21 additions & 10 deletions

File tree

net/sched/sch_red.c

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -90,17 +90,20 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
9090
case RED_PROB_MARK:
9191
qdisc_qstats_overlimit(sch);
9292
if (!red_use_ecn(q)) {
93-
q->stats.prob_drop++;
93+
WRITE_ONCE(q->stats.prob_drop,
94+
q->stats.prob_drop + 1);
9495
goto congestion_drop;
9596
}
9697

9798
if (INET_ECN_set_ce(skb)) {
98-
q->stats.prob_mark++;
99+
WRITE_ONCE(q->stats.prob_mark,
100+
q->stats.prob_mark + 1);
99101
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
100102
if (!skb)
101103
return NET_XMIT_CN | ret;
102104
} else if (!red_use_nodrop(q)) {
103-
q->stats.prob_drop++;
105+
WRITE_ONCE(q->stats.prob_drop,
106+
q->stats.prob_drop + 1);
104107
goto congestion_drop;
105108
}
106109

@@ -111,17 +114,20 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
111114
reason = QDISC_DROP_OVERLIMIT;
112115
qdisc_qstats_overlimit(sch);
113116
if (red_use_harddrop(q) || !red_use_ecn(q)) {
114-
q->stats.forced_drop++;
117+
WRITE_ONCE(q->stats.forced_drop,
118+
q->stats.forced_drop + 1);
115119
goto congestion_drop;
116120
}
117121

118122
if (INET_ECN_set_ce(skb)) {
119-
q->stats.forced_mark++;
123+
WRITE_ONCE(q->stats.forced_mark,
124+
q->stats.forced_mark + 1);
120125
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
121126
if (!skb)
122127
return NET_XMIT_CN | ret;
123128
} else if (!red_use_nodrop(q)) {
124-
q->stats.forced_drop++;
129+
WRITE_ONCE(q->stats.forced_drop,
130+
q->stats.forced_drop + 1);
125131
goto congestion_drop;
126132
}
127133

@@ -135,7 +141,8 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
135141
sch->qstats.backlog += len;
136142
sch->q.qlen++;
137143
} else if (net_xmit_drop_count(ret)) {
138-
q->stats.pdrop++;
144+
WRITE_ONCE(q->stats.pdrop,
145+
q->stats.pdrop + 1);
139146
qdisc_qstats_drop(sch);
140147
}
141148
return ret;
@@ -463,9 +470,13 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
463470
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
464471
&hw_stats_request);
465472
}
466-
st.early = q->stats.prob_drop + q->stats.forced_drop;
467-
st.pdrop = q->stats.pdrop;
468-
st.marked = q->stats.prob_mark + q->stats.forced_mark;
473+
st.early = READ_ONCE(q->stats.prob_drop) +
474+
READ_ONCE(q->stats.forced_drop);
475+
476+
st.pdrop = READ_ONCE(q->stats.pdrop);
477+
478+
st.marked = READ_ONCE(q->stats.prob_mark) +
479+
READ_ONCE(q->stats.forced_mark);
469480

470481
return gnet_stats_copy_app(d, &st, sizeof(st));
471482
}

0 commit comments

Comments
 (0)