@@ -4372,7 +4372,7 @@ static void tcp_ofo_queue(struct sock *sk)
43724372
43734373 p = rb_first (& tp -> out_of_order_queue );
43744374 while (p ) {
4375- skb = rb_entry ( p , struct sk_buff , rbnode );
4375+ skb = rb_to_skb ( p );
43764376 if (after (TCP_SKB_CB (skb )-> seq , tp -> rcv_nxt ))
43774377 break ;
43784378
@@ -4440,7 +4440,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
44404440static void tcp_data_queue_ofo (struct sock * sk , struct sk_buff * skb )
44414441{
44424442 struct tcp_sock * tp = tcp_sk (sk );
4443- struct rb_node * * p , * q , * parent ;
4443+ struct rb_node * * p , * parent ;
44444444 struct sk_buff * skb1 ;
44454445 u32 seq , end_seq ;
44464446 bool fragstolen ;
@@ -4503,7 +4503,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
45034503 parent = NULL ;
45044504 while (* p ) {
45054505 parent = * p ;
4506- skb1 = rb_entry (parent , struct sk_buff , rbnode );
4506+ skb1 = rb_to_skb (parent );
45074507 if (before (seq , TCP_SKB_CB (skb1 )-> seq )) {
45084508 p = & parent -> rb_left ;
45094509 continue ;
@@ -4548,9 +4548,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
45484548
45494549merge_right :
45504550 /* Remove other segments covered by skb. */
4551- while ((q = rb_next (& skb -> rbnode )) != NULL ) {
4552- skb1 = rb_entry (q , struct sk_buff , rbnode );
4553-
4551+ while ((skb1 = skb_rb_next (skb )) != NULL ) {
45544552 if (!after (end_seq , TCP_SKB_CB (skb1 )-> seq ))
45554553 break ;
45564554 if (before (end_seq , TCP_SKB_CB (skb1 )-> end_seq )) {
@@ -4565,7 +4563,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
45654563 tcp_drop (sk , skb1 );
45664564 }
45674565 /* If there is no skb after us, we are the last_skb ! */
4568- if (!q )
4566+ if (!skb1 )
45694567 tp -> ooo_last_skb = skb ;
45704568
45714569add_sack :
@@ -4749,7 +4747,7 @@ static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *li
47494747 if (list )
47504748 return !skb_queue_is_last (list , skb ) ? skb -> next : NULL ;
47514749
4752- return rb_entry_safe ( rb_next ( & skb -> rbnode ), struct sk_buff , rbnode );
4750+ return skb_rb_next ( skb );
47534751}
47544752
47554753static struct sk_buff * tcp_collapse_one (struct sock * sk , struct sk_buff * skb ,
@@ -4778,7 +4776,7 @@ static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
47784776
47794777 while (* p ) {
47804778 parent = * p ;
4781- skb1 = rb_entry (parent , struct sk_buff , rbnode );
4779+ skb1 = rb_to_skb (parent );
47824780 if (before (TCP_SKB_CB (skb )-> seq , TCP_SKB_CB (skb1 )-> seq ))
47834781 p = & parent -> rb_left ;
47844782 else
@@ -4898,27 +4896,20 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
48984896 struct tcp_sock * tp = tcp_sk (sk );
48994897 u32 range_truesize , sum_tiny = 0 ;
49004898 struct sk_buff * skb , * head ;
4901- struct rb_node * p ;
49024899 u32 start , end ;
49034900
4904- p = rb_first (& tp -> out_of_order_queue );
4905- skb = rb_entry_safe (p , struct sk_buff , rbnode );
4901+ skb = skb_rb_first (& tp -> out_of_order_queue );
49064902new_range :
49074903 if (!skb ) {
4908- p = rb_last (& tp -> out_of_order_queue );
4909- /* Note: This is possible p is NULL here. We do not
4910- * use rb_entry_safe(), as ooo_last_skb is valid only
4911- * if rbtree is not empty.
4912- */
4913- tp -> ooo_last_skb = rb_entry (p , struct sk_buff , rbnode );
4904+ tp -> ooo_last_skb = skb_rb_last (& tp -> out_of_order_queue );
49144905 return ;
49154906 }
49164907 start = TCP_SKB_CB (skb )-> seq ;
49174908 end = TCP_SKB_CB (skb )-> end_seq ;
49184909 range_truesize = skb -> truesize ;
49194910
49204911 for (head = skb ;;) {
4921- skb = tcp_skb_next (skb , NULL );
4912+ skb = skb_rb_next (skb );
49224913
49234914 /* Range is terminated when we see a gap or when
49244915 * we are at the queue end.
@@ -4974,7 +4965,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
49744965 prev = rb_prev (node );
49754966 rb_erase (node , & tp -> out_of_order_queue );
49764967 goal -= rb_to_skb (node )-> truesize ;
4977- tcp_drop (sk , rb_entry (node , struct sk_buff , rbnode ));
4968+ tcp_drop (sk , rb_to_skb (node ));
49784969 if (!prev || goal <= 0 ) {
49794970 sk_mem_reclaim (sk );
49804971 if (atomic_read (& sk -> sk_rmem_alloc ) <= sk -> sk_rcvbuf &&
@@ -4984,7 +4975,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
49844975 }
49854976 node = prev ;
49864977 } while (node );
4987- tp -> ooo_last_skb = rb_entry (prev , struct sk_buff , rbnode );
4978+ tp -> ooo_last_skb = rb_to_skb (prev );
49884979
49894980 /* Reset SACK state. A conforming SACK implementation will
49904981 * do the same at a timeout based retransmit. When a connection
0 commit comments