Skip to content

Commit 37c7cc8

Browse files
edumazetgregkh
authored andcommitted
net: add rb_to_skb() and other rb tree helpers
Geeralize private netem_rb_to_skb() TCP rtx queue will soon be converted to rb-tree, so we will need skb_rbtree_walk() helpers. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net> (cherry picked from commit 18a4c0e) Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 6bf32cd commit 37c7cc8

4 files changed

Lines changed: 37 additions & 36 deletions

File tree

include/linux/skbuff.h

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3169,6 +3169,12 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
31693169

31703170
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
31713171

3172+
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3173+
#define skb_rb_first(root) rb_to_skb(rb_first(root))
3174+
#define skb_rb_last(root) rb_to_skb(rb_last(root))
3175+
#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3176+
#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3177+
31723178
#define skb_queue_walk(queue, skb) \
31733179
for (skb = (queue)->next; \
31743180
skb != (struct sk_buff *)(queue); \
@@ -3183,6 +3189,18 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
31833189
for (; skb != (struct sk_buff *)(queue); \
31843190
skb = skb->next)
31853191

3192+
#define skb_rbtree_walk(skb, root) \
3193+
for (skb = skb_rb_first(root); skb != NULL; \
3194+
skb = skb_rb_next(skb))
3195+
3196+
#define skb_rbtree_walk_from(skb) \
3197+
for (; skb != NULL; \
3198+
skb = skb_rb_next(skb))
3199+
3200+
#define skb_rbtree_walk_from_safe(skb, tmp) \
3201+
for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3202+
skb = tmp)
3203+
31863204
#define skb_queue_walk_from_safe(queue, skb, tmp) \
31873205
for (tmp = skb->next; \
31883206
skb != (struct sk_buff *)(queue); \

net/ipv4/tcp_fastopen.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -458,17 +458,15 @@ bool tcp_fastopen_active_should_disable(struct sock *sk)
458458
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
459459
{
460460
struct tcp_sock *tp = tcp_sk(sk);
461-
struct rb_node *p;
462-
struct sk_buff *skb;
463461
struct dst_entry *dst;
462+
struct sk_buff *skb;
464463

465464
if (!tp->syn_fastopen)
466465
return;
467466

468467
if (!tp->data_segs_in) {
469-
p = rb_first(&tp->out_of_order_queue);
470-
if (p && !rb_next(p)) {
471-
skb = rb_entry(p, struct sk_buff, rbnode);
468+
skb = skb_rb_first(&tp->out_of_order_queue);
469+
if (skb && !skb_rb_next(skb)) {
472470
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
473471
tcp_fastopen_active_disable(sk);
474472
return;

net/ipv4/tcp_input.c

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk)
43724372

43734373
p = rb_first(&tp->out_of_order_queue);
43744374
while (p) {
4375-
skb = rb_entry(p, struct sk_buff, rbnode);
4375+
skb = rb_to_skb(p);
43764376
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
43774377
break;
43784378

@@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
44404440
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
44414441
{
44424442
struct tcp_sock *tp = tcp_sk(sk);
4443-
struct rb_node **p, *q, *parent;
4443+
struct rb_node **p, *parent;
44444444
struct sk_buff *skb1;
44454445
u32 seq, end_seq;
44464446
bool fragstolen;
@@ -4503,7 +4503,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
45034503
parent = NULL;
45044504
while (*p) {
45054505
parent = *p;
4506-
skb1 = rb_entry(parent, struct sk_buff, rbnode);
4506+
skb1 = rb_to_skb(parent);
45074507
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
45084508
p = &parent->rb_left;
45094509
continue;
@@ -4548,9 +4548,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
45484548

45494549
merge_right:
45504550
/* Remove other segments covered by skb. */
4551-
while ((q = rb_next(&skb->rbnode)) != NULL) {
4552-
skb1 = rb_entry(q, struct sk_buff, rbnode);
4553-
4551+
while ((skb1 = skb_rb_next(skb)) != NULL) {
45544552
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
45554553
break;
45564554
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@@ -4565,7 +4563,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
45654563
tcp_drop(sk, skb1);
45664564
}
45674565
/* If there is no skb after us, we are the last_skb ! */
4568-
if (!q)
4566+
if (!skb1)
45694567
tp->ooo_last_skb = skb;
45704568

45714569
add_sack:
@@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
47494747
if (list)
47504748
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
47514749

4752-
return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
4750+
return skb_rb_next(skb);
47534751
}
47544752

47554753
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
@@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
47784776

47794777
while (*p) {
47804778
parent = *p;
4781-
skb1 = rb_entry(parent, struct sk_buff, rbnode);
4779+
skb1 = rb_to_skb(parent);
47824780
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
47834781
p = &parent->rb_left;
47844782
else
@@ -4898,27 +4896,20 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
48984896
struct tcp_sock *tp = tcp_sk(sk);
48994897
u32 range_truesize, sum_tiny = 0;
49004898
struct sk_buff *skb, *head;
4901-
struct rb_node *p;
49024899
u32 start, end;
49034900

4904-
p = rb_first(&tp->out_of_order_queue);
4905-
skb = rb_entry_safe(p, struct sk_buff, rbnode);
4901+
skb = skb_rb_first(&tp->out_of_order_queue);
49064902
new_range:
49074903
if (!skb) {
4908-
p = rb_last(&tp->out_of_order_queue);
4909-
/* Note: This is possible p is NULL here. We do not
4910-
* use rb_entry_safe(), as ooo_last_skb is valid only
4911-
* if rbtree is not empty.
4912-
*/
4913-
tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
4904+
tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
49144905
return;
49154906
}
49164907
start = TCP_SKB_CB(skb)->seq;
49174908
end = TCP_SKB_CB(skb)->end_seq;
49184909
range_truesize = skb->truesize;
49194910

49204911
for (head = skb;;) {
4921-
skb = tcp_skb_next(skb, NULL);
4912+
skb = skb_rb_next(skb);
49224913

49234914
/* Range is terminated when we see a gap or when
49244915
* we are at the queue end.
@@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
49744965
prev = rb_prev(node);
49754966
rb_erase(node, &tp->out_of_order_queue);
49764967
goal -= rb_to_skb(node)->truesize;
4977-
tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
4968+
tcp_drop(sk, rb_to_skb(node));
49784969
if (!prev || goal <= 0) {
49794970
sk_mem_reclaim(sk);
49804971
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
@@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
49844975
}
49854976
node = prev;
49864977
} while (node);
4987-
tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
4978+
tp->ooo_last_skb = rb_to_skb(prev);
49884979

49894980
/* Reset SACK state. A conforming SACK implementation will
49904981
* do the same at a timeout based retransmit. When a connection

net/sched/sch_netem.c

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -149,12 +149,6 @@ struct netem_skb_cb {
149149
ktime_t tstamp_save;
150150
};
151151

152-
153-
static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
154-
{
155-
return rb_entry(rb, struct sk_buff, rbnode);
156-
}
157-
158152
static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
159153
{
160154
/* we assume we can use skb next/prev/tstamp as storage for rb_node */
@@ -365,7 +359,7 @@ static void tfifo_reset(struct Qdisc *sch)
365359
struct rb_node *p;
366360

367361
while ((p = rb_first(&q->t_root))) {
368-
struct sk_buff *skb = netem_rb_to_skb(p);
362+
struct sk_buff *skb = rb_to_skb(p);
369363

370364
rb_erase(p, &q->t_root);
371365
rtnl_kfree_skbs(skb, skb);
@@ -382,7 +376,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
382376
struct sk_buff *skb;
383377

384378
parent = *p;
385-
skb = netem_rb_to_skb(parent);
379+
skb = rb_to_skb(parent);
386380
if (tnext >= netem_skb_cb(skb)->time_to_send)
387381
p = &parent->rb_right;
388382
else
@@ -538,7 +532,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
538532
struct sk_buff *t_skb;
539533
struct netem_skb_cb *t_last;
540534

541-
t_skb = netem_rb_to_skb(rb_last(&q->t_root));
535+
t_skb = skb_rb_last(&q->t_root);
542536
t_last = netem_skb_cb(t_skb);
543537
if (!last ||
544538
t_last->time_to_send > last->time_to_send) {
@@ -618,7 +612,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
618612
if (p) {
619613
psched_time_t time_to_send;
620614

621-
skb = netem_rb_to_skb(p);
615+
skb = rb_to_skb(p);
622616

623617
/* if more time remaining? */
624618
time_to_send = netem_skb_cb(skb)->time_to_send;

0 commit comments

Comments
 (0)