@@ -986,7 +986,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
986986 }
987987#endif
988988
989- schedule_work (& musb -> irq_work );
989+ schedule_delayed_work (& musb -> irq_work , 0 );
990990
991991 return handled ;
992992}
@@ -1855,14 +1855,23 @@ static void musb_pm_runtime_check_session(struct musb *musb)
18551855 MUSB_DEVCTL_HR ;
18561856 switch (devctl & ~s ) {
18571857 case MUSB_QUIRK_B_INVALID_VBUS_91 :
1858- if (!musb -> session && !musb -> quirk_invalid_vbus ) {
1859- musb -> quirk_invalid_vbus = true;
1858+ if (musb -> quirk_retries -- ) {
18601859 musb_dbg (musb ,
1861- "First invalid vbus, assume no session" );
1860+ "Poll devctl on invalid vbus, assume no session" );
1861+ schedule_delayed_work (& musb -> irq_work ,
1862+ msecs_to_jiffies (1000 ));
1863+
18621864 return ;
18631865 }
1864- break ;
18651866 case MUSB_QUIRK_A_DISCONNECT_19 :
1867+ if (musb -> quirk_retries -- ) {
1868+ musb_dbg (musb ,
1869+ "Poll devctl on possible host mode disconnect" );
1870+ schedule_delayed_work (& musb -> irq_work ,
1871+ msecs_to_jiffies (1000 ));
1872+
1873+ return ;
1874+ }
18661875 if (!musb -> session )
18671876 break ;
18681877 musb_dbg (musb , "Allow PM on possible host mode disconnect" );
@@ -1886,9 +1895,9 @@ static void musb_pm_runtime_check_session(struct musb *musb)
18861895 if (error < 0 )
18871896 dev_err (musb -> controller , "Could not enable: %i\n" ,
18881897 error );
1898+ musb -> quirk_retries = 3 ;
18891899 } else {
18901900 musb_dbg (musb , "Allow PM with no session: %02x" , devctl );
1891- musb -> quirk_invalid_vbus = false;
18921901 pm_runtime_mark_last_busy (musb -> controller );
18931902 pm_runtime_put_autosuspend (musb -> controller );
18941903 }
@@ -1899,7 +1908,7 @@ static void musb_pm_runtime_check_session(struct musb *musb)
18991908/* Only used to provide driver mode change events */
19001909static void musb_irq_work (struct work_struct * data )
19011910{
1902- struct musb * musb = container_of (data , struct musb , irq_work );
1911+ struct musb * musb = container_of (data , struct musb , irq_work . work );
19031912
19041913 musb_pm_runtime_check_session (musb );
19051914
@@ -1969,6 +1978,7 @@ static struct musb *allocate_instance(struct device *dev,
19691978 INIT_LIST_HEAD (& musb -> control );
19701979 INIT_LIST_HEAD (& musb -> in_bulk );
19711980 INIT_LIST_HEAD (& musb -> out_bulk );
1981+ INIT_LIST_HEAD (& musb -> pending_list );
19721982
19731983 musb -> vbuserr_retry = VBUSERR_RETRY_COUNT ;
19741984 musb -> a_wait_bcon = OTG_TIME_A_WAIT_BCON ;
@@ -2018,6 +2028,84 @@ static void musb_free(struct musb *musb)
20182028 musb_host_free (musb );
20192029}
20202030
2031+ struct musb_pending_work {
2032+ int (* callback )(struct musb * musb , void * data );
2033+ void * data ;
2034+ struct list_head node ;
2035+ };
2036+
2037+ /*
2038+ * Called from musb_runtime_resume(), musb_resume(), and
2039+ * musb_queue_resume_work(). Callers must take musb->lock.
2040+ */
2041+ static int musb_run_resume_work (struct musb * musb )
2042+ {
2043+ struct musb_pending_work * w , * _w ;
2044+ unsigned long flags ;
2045+ int error = 0 ;
2046+
2047+ spin_lock_irqsave (& musb -> list_lock , flags );
2048+ list_for_each_entry_safe (w , _w , & musb -> pending_list , node ) {
2049+ if (w -> callback ) {
2050+ error = w -> callback (musb , w -> data );
2051+ if (error < 0 ) {
2052+ dev_err (musb -> controller ,
2053+ "resume callback %p failed: %i\n" ,
2054+ w -> callback , error );
2055+ }
2056+ }
2057+ list_del (& w -> node );
2058+ devm_kfree (musb -> controller , w );
2059+ }
2060+ spin_unlock_irqrestore (& musb -> list_lock , flags );
2061+
2062+ return error ;
2063+ }
2064+
2065+ /*
2066+ * Called to run work if device is active or else queue the work to happen
2067+ * on resume. Caller must take musb->lock and must hold an RPM reference.
2068+ *
2069+ * Note that we cowardly refuse queuing work after musb PM runtime
2070+ * resume is done calling musb_run_resume_work() and return -EINPROGRESS
2071+ * instead.
2072+ */
2073+ int musb_queue_resume_work (struct musb * musb ,
2074+ int (* callback )(struct musb * musb , void * data ),
2075+ void * data )
2076+ {
2077+ struct musb_pending_work * w ;
2078+ unsigned long flags ;
2079+ int error ;
2080+
2081+ if (WARN_ON (!callback ))
2082+ return - EINVAL ;
2083+
2084+ if (pm_runtime_active (musb -> controller ))
2085+ return callback (musb , data );
2086+
2087+ w = devm_kzalloc (musb -> controller , sizeof (* w ), GFP_ATOMIC );
2088+ if (!w )
2089+ return - ENOMEM ;
2090+
2091+ w -> callback = callback ;
2092+ w -> data = data ;
2093+ spin_lock_irqsave (& musb -> list_lock , flags );
2094+ if (musb -> is_runtime_suspended ) {
2095+ list_add_tail (& w -> node , & musb -> pending_list );
2096+ error = 0 ;
2097+ } else {
2098+ dev_err (musb -> controller , "could not add resume work %p\n" ,
2099+ callback );
2100+ devm_kfree (musb -> controller , w );
2101+ error = - EINPROGRESS ;
2102+ }
2103+ spin_unlock_irqrestore (& musb -> list_lock , flags );
2104+
2105+ return error ;
2106+ }
2107+ EXPORT_SYMBOL_GPL (musb_queue_resume_work );
2108+
20212109static void musb_deassert_reset (struct work_struct * work )
20222110{
20232111 struct musb * musb ;
@@ -2065,6 +2153,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
20652153 }
20662154
20672155 spin_lock_init (& musb -> lock );
2156+ spin_lock_init (& musb -> list_lock );
20682157 musb -> board_set_power = plat -> set_power ;
20692158 musb -> min_power = plat -> min_power ;
20702159 musb -> ops = plat -> platform_ops ;
@@ -2208,7 +2297,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
22082297 musb_generic_disable (musb );
22092298
22102299 /* Init IRQ workqueue before request_irq */
2211- INIT_WORK (& musb -> irq_work , musb_irq_work );
2300+ INIT_DELAYED_WORK (& musb -> irq_work , musb_irq_work );
22122301 INIT_DELAYED_WORK (& musb -> deassert_reset_work , musb_deassert_reset );
22132302 INIT_DELAYED_WORK (& musb -> finish_resume_work , musb_host_finish_resume );
22142303
@@ -2291,6 +2380,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
22912380 if (status )
22922381 goto fail5 ;
22932382
2383+ musb -> is_initialized = 1 ;
22942384 pm_runtime_mark_last_busy (musb -> controller );
22952385 pm_runtime_put_autosuspend (musb -> controller );
22962386
@@ -2304,7 +2394,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
23042394 musb_host_cleanup (musb );
23052395
23062396fail3 :
2307- cancel_work_sync (& musb -> irq_work );
2397+ cancel_delayed_work_sync (& musb -> irq_work );
23082398 cancel_delayed_work_sync (& musb -> finish_resume_work );
23092399 cancel_delayed_work_sync (& musb -> deassert_reset_work );
23102400 if (musb -> dma_controller )
@@ -2371,7 +2461,7 @@ static int musb_remove(struct platform_device *pdev)
23712461 */
23722462 musb_exit_debugfs (musb );
23732463
2374- cancel_work_sync (& musb -> irq_work );
2464+ cancel_delayed_work_sync (& musb -> irq_work );
23752465 cancel_delayed_work_sync (& musb -> finish_resume_work );
23762466 cancel_delayed_work_sync (& musb -> deassert_reset_work );
23772467 pm_runtime_get_sync (musb -> controller );
@@ -2557,6 +2647,7 @@ static int musb_suspend(struct device *dev)
25572647
25582648 musb_platform_disable (musb );
25592649 musb_generic_disable (musb );
2650+ WARN_ON (!list_empty (& musb -> pending_list ));
25602651
25612652 spin_lock_irqsave (& musb -> lock , flags );
25622653
@@ -2578,9 +2669,11 @@ static int musb_suspend(struct device *dev)
25782669
25792670static int musb_resume (struct device * dev )
25802671{
2581- struct musb * musb = dev_to_musb (dev );
2582- u8 devctl ;
2583- u8 mask ;
2672+ struct musb * musb = dev_to_musb (dev );
2673+ unsigned long flags ;
2674+ int error ;
2675+ u8 devctl ;
2676+ u8 mask ;
25842677
25852678 /*
25862679 * For static cmos like DaVinci, register values were preserved
@@ -2614,6 +2707,13 @@ static int musb_resume(struct device *dev)
26142707
26152708 musb_start (musb );
26162709
2710+ spin_lock_irqsave (& musb -> lock , flags );
2711+ error = musb_run_resume_work (musb );
2712+ if (error )
2713+ dev_err (musb -> controller , "resume work failed with %i\n" ,
2714+ error );
2715+ spin_unlock_irqrestore (& musb -> lock , flags );
2716+
26172717 return 0 ;
26182718}
26192719
@@ -2622,14 +2722,16 @@ static int musb_runtime_suspend(struct device *dev)
26222722 struct musb * musb = dev_to_musb (dev );
26232723
26242724 musb_save_context (musb );
2725+ musb -> is_runtime_suspended = 1 ;
26252726
26262727 return 0 ;
26272728}
26282729
26292730static int musb_runtime_resume (struct device * dev )
26302731{
2631- struct musb * musb = dev_to_musb (dev );
2632- static int first = 1 ;
2732+ struct musb * musb = dev_to_musb (dev );
2733+ unsigned long flags ;
2734+ int error ;
26332735
26342736 /*
26352737 * When pm_runtime_get_sync called for the first time in driver
@@ -2640,16 +2742,25 @@ static int musb_runtime_resume(struct device *dev)
26402742 * Also context restore without save does not make
26412743 * any sense
26422744 */
2643- if (!first )
2644- musb_restore_context (musb );
2645- first = 0 ;
2745+ if (!musb -> is_initialized )
2746+ return 0 ;
2747+
2748+ musb_restore_context (musb );
26462749
26472750 if (musb -> need_finish_resume ) {
26482751 musb -> need_finish_resume = 0 ;
26492752 schedule_delayed_work (& musb -> finish_resume_work ,
26502753 msecs_to_jiffies (USB_RESUME_TIMEOUT ));
26512754 }
26522755
2756+ spin_lock_irqsave (& musb -> lock , flags );
2757+ error = musb_run_resume_work (musb );
2758+ if (error )
2759+ dev_err (musb -> controller , "resume work failed with %i\n" ,
2760+ error );
2761+ musb -> is_runtime_suspended = 0 ;
2762+ spin_unlock_irqrestore (& musb -> lock , flags );
2763+
26532764 return 0 ;
26542765}
26552766
0 commit comments