7878#include <linux/slab.h>
7979#include <linux/sched.h>
8080#include <linux/sched/mm.h>
81- #include <linux/smpboot.h>
8281#include <linux/mutex.h>
8382#include <linux/rwsem.h>
8483#include <linux/string.h>
@@ -218,31 +217,6 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
218217 return & net -> dev_index_head [ifindex & (NETDEV_HASHENTRIES - 1 )];
219218}
220219
221- #ifndef CONFIG_PREEMPT_RT
222-
223- static DEFINE_STATIC_KEY_FALSE (use_backlog_threads_key );
224-
225- static int __init setup_backlog_napi_threads (char * arg )
226- {
227- static_branch_enable (& use_backlog_threads_key );
228- return 0 ;
229- }
230- early_param ("thread_backlog_napi" , setup_backlog_napi_threads );
231-
232- static bool use_backlog_threads (void )
233- {
234- return static_branch_unlikely (& use_backlog_threads_key );
235- }
236-
237- #else
238-
239- static bool use_backlog_threads (void )
240- {
241- return true;
242- }
243-
244- #endif
245-
246220static inline void rps_lock_irqsave (struct softnet_data * sd ,
247221 unsigned long * flags )
248222{
@@ -4533,7 +4507,6 @@ EXPORT_SYMBOL(__dev_direct_xmit);
45334507/*************************************************************************
45344508 * Receiver routines
45354509 *************************************************************************/
4536- static DEFINE_PER_CPU (struct task_struct * , backlog_napi ) ;
45374510
45384511int netdev_max_backlog __read_mostly = 1000 ;
45394512EXPORT_SYMBOL (netdev_max_backlog );
@@ -4566,16 +4539,12 @@ static inline void ____napi_schedule(struct softnet_data *sd,
45664539 */
45674540 thread = READ_ONCE (napi -> thread );
45684541 if (thread ) {
4569- if (use_backlog_threads () && thread == raw_cpu_read (backlog_napi ))
4570- goto use_local_napi ;
4571-
45724542 set_bit (NAPI_STATE_SCHED_THREADED , & napi -> state );
45734543 wake_up_process (thread );
45744544 return ;
45754545 }
45764546 }
45774547
4578- use_local_napi :
45794548 list_add_tail (& napi -> poll_list , & sd -> poll_list );
45804549 WRITE_ONCE (napi -> list_owner , smp_processor_id ());
45814550 /* If not called from net_rx_action()
@@ -4821,11 +4790,6 @@ static void napi_schedule_rps(struct softnet_data *sd)
48214790
48224791#ifdef CONFIG_RPS
48234792 if (sd != mysd ) {
4824- if (use_backlog_threads ()) {
4825- __napi_schedule_irqoff (& sd -> backlog );
4826- return ;
4827- }
4828-
48294793 sd -> rps_ipi_next = mysd -> rps_ipi_list ;
48304794 mysd -> rps_ipi_list = sd ;
48314795
@@ -6049,7 +6013,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
60496013#ifdef CONFIG_RPS
60506014 struct softnet_data * remsd = sd -> rps_ipi_list ;
60516015
6052- if (! use_backlog_threads () && remsd ) {
6016+ if (remsd ) {
60536017 sd -> rps_ipi_list = NULL ;
60546018
60556019 local_irq_enable ();
@@ -6064,7 +6028,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
60646028static bool sd_has_rps_ipi_waiting (struct softnet_data * sd )
60656029{
60666030#ifdef CONFIG_RPS
6067- return ! use_backlog_threads () && sd -> rps_ipi_list ;
6031+ return sd -> rps_ipi_list != NULL ;
60686032#else
60696033 return false;
60706034#endif
@@ -6108,7 +6072,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
61086072 * We can use a plain write instead of clear_bit(),
61096073 * and we dont need an smp_mb() memory barrier.
61106074 */
6111- napi -> state &= NAPIF_STATE_THREADED ;
6075+ napi -> state = 0 ;
61126076 again = false;
61136077 } else {
61146078 skb_queue_splice_tail_init (& sd -> input_pkt_queue ,
@@ -6774,48 +6738,43 @@ static void skb_defer_free_flush(struct softnet_data *sd)
67746738 }
67756739}
67766740
6777- static void napi_threaded_poll_loop ( struct napi_struct * napi )
6741+ static int napi_threaded_poll ( void * data )
67786742{
6743+ struct napi_struct * napi = data ;
67796744 struct softnet_data * sd ;
6780- unsigned long last_qs = jiffies ;
6745+ void * have ;
67816746
6782- for (;;) {
6783- bool repoll = false;
6784- void * have ;
6747+ while (!napi_thread_wait (napi )) {
6748+ unsigned long last_qs = jiffies ;
67856749
6786- local_bh_disable ();
6787- sd = this_cpu_ptr (& softnet_data );
6788- sd -> in_napi_threaded_poll = true;
6750+ for (;;) {
6751+ bool repoll = false;
67896752
6790- have = netpoll_poll_lock ( napi );
6791- __napi_poll ( napi , & repoll );
6792- netpoll_poll_unlock ( have ) ;
6753+ local_bh_disable ( );
6754+ sd = this_cpu_ptr ( & softnet_data );
6755+ sd -> in_napi_threaded_poll = true ;
67936756
6794- sd -> in_napi_threaded_poll = false;
6795- barrier ();
6757+ have = netpoll_poll_lock (napi );
6758+ __napi_poll (napi , & repoll );
6759+ netpoll_poll_unlock (have );
67966760
6797- if (sd_has_rps_ipi_waiting (sd )) {
6798- local_irq_disable ();
6799- net_rps_action_and_irq_enable (sd );
6800- }
6801- skb_defer_free_flush (sd );
6802- local_bh_enable ();
6761+ sd -> in_napi_threaded_poll = false;
6762+ barrier ();
68036763
6804- if (!repoll )
6805- break ;
6806-
6807- rcu_softirq_qs_periodic (last_qs );
6808- cond_resched ();
6809- }
6810- }
6811-
6812- static int napi_threaded_poll (void * data )
6813- {
6814- struct napi_struct * napi = data ;
6764+ if (sd_has_rps_ipi_waiting (sd )) {
6765+ local_irq_disable ();
6766+ net_rps_action_and_irq_enable (sd );
6767+ }
6768+ skb_defer_free_flush (sd );
6769+ local_bh_enable ();
68156770
6816- while (!napi_thread_wait ( napi ) )
6817- napi_threaded_poll_loop ( napi ) ;
6771+ if (!repoll )
6772+ break ;
68186773
6774+ rcu_softirq_qs_periodic (last_qs );
6775+ cond_resched ();
6776+ }
6777+ }
68196778 return 0 ;
68206779}
68216780
@@ -11400,22 +11359,20 @@ static int dev_cpu_dead(unsigned int oldcpu)
1140011359
1140111360 list_del_init (& napi -> poll_list );
1140211361 if (napi -> poll == process_backlog )
11403- napi -> state &= NAPIF_STATE_THREADED ;
11362+ napi -> state = 0 ;
1140411363 else
1140511364 ____napi_schedule (sd , napi );
1140611365 }
1140711366
1140811367 raise_softirq_irqoff (NET_TX_SOFTIRQ );
1140911368 local_irq_enable ();
1141011369
11411- if (!use_backlog_threads ()) {
1141211370#ifdef CONFIG_RPS
11413- remsd = oldsd -> rps_ipi_list ;
11414- oldsd -> rps_ipi_list = NULL ;
11371+ remsd = oldsd -> rps_ipi_list ;
11372+ oldsd -> rps_ipi_list = NULL ;
1141511373#endif
11416- /* send out pending IPI's on offline CPU */
11417- net_rps_send_ipi (remsd );
11418- }
11374+ /* send out pending IPI's on offline CPU */
11375+ net_rps_send_ipi (remsd );
1141911376
1142011377 /* Process offline CPU's input_pkt_queue */
1142111378 while ((skb = __skb_dequeue (& oldsd -> process_queue ))) {
@@ -11678,38 +11635,6 @@ static struct pernet_operations __net_initdata default_device_ops = {
1167811635 *
1167911636 */
1168011637
11681- static int backlog_napi_should_run (unsigned int cpu )
11682- {
11683- struct softnet_data * sd = per_cpu_ptr (& softnet_data , cpu );
11684- struct napi_struct * napi = & sd -> backlog ;
11685-
11686- return test_bit (NAPI_STATE_SCHED_THREADED , & napi -> state );
11687- }
11688-
11689- static void run_backlog_napi (unsigned int cpu )
11690- {
11691- struct softnet_data * sd = per_cpu_ptr (& softnet_data , cpu );
11692-
11693- napi_threaded_poll_loop (& sd -> backlog );
11694- }
11695-
11696- static void backlog_napi_setup (unsigned int cpu )
11697- {
11698- struct softnet_data * sd = per_cpu_ptr (& softnet_data , cpu );
11699- struct napi_struct * napi = & sd -> backlog ;
11700-
11701- napi -> thread = this_cpu_read (backlog_napi );
11702- set_bit (NAPI_STATE_THREADED , & napi -> state );
11703- }
11704-
11705- static struct smp_hotplug_thread backlog_threads = {
11706- .store = & backlog_napi ,
11707- .thread_should_run = backlog_napi_should_run ,
11708- .thread_fn = run_backlog_napi ,
11709- .thread_comm = "backlog_napi/%u" ,
11710- .setup = backlog_napi_setup ,
11711- };
11712-
1171311638/*
1171411639 * This is called single threaded during boot, so no need
1171511640 * to take the rtnl semaphore.
@@ -11760,10 +11685,7 @@ static int __init net_dev_init(void)
1176011685 init_gro_hash (& sd -> backlog );
1176111686 sd -> backlog .poll = process_backlog ;
1176211687 sd -> backlog .weight = weight_p ;
11763- INIT_LIST_HEAD (& sd -> backlog .poll_list );
1176411688 }
11765- if (use_backlog_threads ())
11766- smpboot_register_percpu_thread (& backlog_threads );
1176711689
1176811690 dev_boot_phase = 0 ;
1176911691
0 commit comments