@@ -21,7 +21,7 @@ static void rcu_exp_gp_seq_start(void)
2121}
2222
2323/*
24- * Return then value that expedited-grace-period counter will have
24+ * Return the value that the expedited-grace-period counter will have
2525 * at the end of the current grace period.
2626 */
2727static __maybe_unused unsigned long rcu_exp_gp_seq_endval (void )
@@ -39,7 +39,9 @@ static void rcu_exp_gp_seq_end(void)
3939}
4040
4141/*
42- * Take a snapshot of the expedited-grace-period counter.
42+ * Take a snapshot of the expedited-grace-period counter, which is the
43+ * earliest value that will indicate that a full grace period has
44+ * elapsed since the current time.
4345 */
4446static unsigned long rcu_exp_gp_seq_snap (void )
4547{
@@ -143,22 +145,18 @@ static void __maybe_unused sync_exp_reset_tree(void)
143145 * Return non-zero if there is no RCU expedited grace period in progress
144146 * for the specified rcu_node structure, in other words, if all CPUs and
145147 * tasks covered by the specified rcu_node structure have done their bit
146- * for the current expedited grace period. Works only for preemptible
147- * RCU -- other RCU implementation use other means.
148- *
149- * Caller must hold the specificed rcu_node structure's ->lock
148+ * for the current expedited grace period.
150149 */
151150static bool sync_rcu_exp_done (struct rcu_node * rnp )
152151{
153152 raw_lockdep_assert_held_rcu_node (rnp );
154-
155153 return rnp -> exp_tasks == NULL &&
156154 READ_ONCE (rnp -> expmask ) == 0 ;
157155}
158156
159157/*
160- * Like sync_rcu_exp_done(), but this function assumes the caller doesn't
161- * hold the rcu_node's ->lock, and will acquire and release the lock itself
158+ * Like sync_rcu_exp_done(), but where the caller does not hold the
159+ * rcu_node's ->lock.
162160 */
163161static bool sync_rcu_exp_done_unlocked (struct rcu_node * rnp )
164162{
@@ -180,15 +178,14 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
180178 * which the task was queued or to one of that rcu_node structure's ancestors,
181179 * recursively up the tree. (Calm down, calm down, we do the recursion
182180 * iteratively!)
183- *
184- * Caller must hold the specified rcu_node structure's ->lock.
185181 */
186182static void __rcu_report_exp_rnp (struct rcu_node * rnp ,
187183 bool wake , unsigned long flags )
188184 __releases (rnp - > lock )
189185{
190186 unsigned long mask ;
191187
188+ raw_lockdep_assert_held_rcu_node (rnp );
192189 for (;;) {
193190 if (!sync_rcu_exp_done (rnp )) {
194191 if (!rnp -> expmask )
@@ -452,6 +449,10 @@ static void sync_rcu_exp_select_cpus(void)
452449 flush_work (& rnp - > rew .rew_work );
453450}
454451
452+ /*
453+ * Wait for the expedited grace period to elapse, issuing any needed
454+ * RCU CPU stall warnings along the way.
455+ */
455456static void synchronize_sched_expedited_wait (void )
456457{
457458 int cpu ;
@@ -781,7 +782,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
781782 * implementations, it is still unfriendly to real-time workloads, so is
782783 * thus not recommended for any sort of common-case code. In fact, if
783784 * you are using synchronize_rcu_expedited() in a loop, please restructure
784- * your code to batch your updates, and then Use a single synchronize_rcu()
785+ * your code to batch your updates, and then use a single synchronize_rcu()
785786 * instead.
786787 *
787788 * This has the same semantics as (but is more brutal than) synchronize_rcu().
0 commit comments