Skip to content

Commit aba1de9

Browse files
committed
landlock: Fix formatting in tsync.c
Fix comment formatting in tsync.c to fit in 80 columns. Cc: Günther Noack <gnoack@google.com> Reviewed-by: Günther Noack <gnoack3000@gmail.com> Link: https://lore.kernel.org/r/20260304193134.250495-4-mic@digikod.net Signed-off-by: Mickaël Salaün <mic@digikod.net>
1 parent fa20aeb commit aba1de9

1 file changed

Lines changed: 58 additions & 49 deletions

File tree

security/landlock/tsync.c

Lines changed: 58 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -85,12 +85,14 @@ static void restrict_one_thread(struct tsync_shared_context *ctx)
8585
/*
8686
* Switch out old_cred with new_cred, if possible.
8787
*
88-
* In the common case, where all threads initially point to the same
89-
* struct cred, this optimization avoids creating separate redundant
90-
* credentials objects for each, which would all have the same contents.
88+
* In the common case, where all threads initially point to the
89+
* same struct cred, this optimization avoids creating separate
90+
* redundant credentials objects for each, which would all have
91+
* the same contents.
9192
*
92-
* Note: We are intentionally dropping the const qualifier here, because
93-
* it is required by commit_creds() and abort_creds().
93+
* Note: We are intentionally dropping the const qualifier
94+
* here, because it is required by commit_creds() and
95+
* abort_creds().
9496
*/
9597
cred = (struct cred *)get_cred(ctx->new_cred);
9698
} else {
@@ -101,8 +103,8 @@ static void restrict_one_thread(struct tsync_shared_context *ctx)
101103
atomic_set(&ctx->preparation_error, -ENOMEM);
102104

103105
/*
104-
* Even on error, we need to adhere to the protocol and coordinate
105-
* with concurrently running invocations.
106+
* Even on error, we need to adhere to the protocol and
107+
* coordinate with concurrently running invocations.
106108
*/
107109
if (atomic_dec_return(&ctx->num_preparing) == 0)
108110
complete_all(&ctx->all_prepared);
@@ -135,9 +137,9 @@ static void restrict_one_thread(struct tsync_shared_context *ctx)
135137
}
136138

137139
/*
138-
* Make sure that all sibling tasks fulfill the no_new_privs prerequisite.
139-
* (This is in line with Seccomp's SECCOMP_FILTER_FLAG_TSYNC logic in
140-
* kernel/seccomp.c)
140+
* Make sure that all sibling tasks fulfill the no_new_privs
141+
* prerequisite. (This is in line with Seccomp's
142+
* SECCOMP_FILTER_FLAG_TSYNC logic in kernel/seccomp.c)
141143
*/
142144
if (ctx->set_no_new_privs)
143145
task_set_no_new_privs(current);
@@ -221,16 +223,17 @@ static void tsync_works_trim(struct tsync_works *s)
221223
ctx = s->works[s->size - 1];
222224

223225
/*
224-
* For consistency, remove the task from ctx so that it does not look like
225-
* we handed it a task_work.
226+
* For consistency, remove the task from ctx so that it does not look
227+
* like we handed it a task_work.
226228
*/
227229
put_task_struct(ctx->task);
228230
*ctx = (typeof(*ctx)){};
229231

230232
/*
231-
* Cancel the tsync_works_provide() change to recycle the reserved memory
232-
* for the next thread, if any. This also ensures that cancel_tsync_works()
233-
* and tsync_works_release() do not see any NULL task pointers.
233+
* Cancel the tsync_works_provide() change to recycle the reserved
234+
* memory for the next thread, if any. This also ensures that
235+
* cancel_tsync_works() and tsync_works_release() do not see any NULL
236+
* task pointers.
234237
*/
235238
s->size--;
236239
}
@@ -388,17 +391,17 @@ static bool schedule_task_work(struct tsync_works *works,
388391
continue;
389392

390393
/*
391-
* We found a sibling thread that is not doing its task_work yet, and
392-
* which might spawn new threads before our task work runs, so we need
393-
* at least one more round in the outer loop.
394+
* We found a sibling thread that is not doing its task_work
395+
* yet, and which might spawn new threads before our task work
396+
* runs, so we need at least one more round in the outer loop.
394397
*/
395398
found_more_threads = true;
396399

397400
ctx = tsync_works_provide(works, thread);
398401
if (!ctx) {
399402
/*
400-
* We ran out of preallocated contexts -- we need to try again with
401-
* this thread at a later time!
403+
* We ran out of preallocated contexts -- we need to
404+
* try again with this thread at a later time!
402405
* found_more_threads is already true at this point.
403406
*/
404407
break;
@@ -413,10 +416,10 @@ static bool schedule_task_work(struct tsync_works *works,
413416
err = task_work_add(thread, &ctx->work, TWA_SIGNAL);
414417
if (unlikely(err)) {
415418
/*
416-
* task_work_add() only fails if the task is about to exit. We
417-
* checked that earlier, but it can happen as a race. Resume
418-
* without setting an error, as the task is probably gone in the
419-
* next loop iteration.
419+
* task_work_add() only fails if the task is about to
420+
* exit. We checked that earlier, but it can happen as
421+
* a race. Resume without setting an error, as the
422+
* task is probably gone in the next loop iteration.
420423
*/
421424
tsync_works_trim(works);
422425

@@ -507,24 +510,25 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
507510
* After this barrier is reached, it's safe to read
508511
* shared_ctx.preparation_error.
509512
*
510-
* 4) reads shared_ctx.preparation_error and then either does commit_creds()
511-
* or abort_creds().
513+
* 4) reads shared_ctx.preparation_error and then either does
514+
* commit_creds() or abort_creds().
512515
*
513516
* 5) signals that it's done altogether (barrier synchronization
514517
* "all_finished")
515518
*
516-
* Unlike seccomp, which modifies sibling tasks directly, we do not need to
517-
* acquire the cred_guard_mutex and sighand->siglock:
519+
* Unlike seccomp, which modifies sibling tasks directly, we do not
520+
* need to acquire the cred_guard_mutex and sighand->siglock:
518521
*
519-
* - As in our case, all threads are themselves exchanging their own struct
520-
* cred through the credentials API, no locks are needed for that.
522+
* - As in our case, all threads are themselves exchanging their own
523+
* struct cred through the credentials API, no locks are needed for
524+
* that.
521525
* - Our for_each_thread() loops are protected by RCU.
522-
* - We do not acquire a lock to keep the list of sibling threads stable
523-
* between our for_each_thread loops. If the list of available sibling
524-
* threads changes between these for_each_thread loops, we make up for
525-
* that by continuing to look for threads until they are all discovered
526-
* and have entered their task_work, where they are unable to spawn new
527-
* threads.
526+
* - We do not acquire a lock to keep the list of sibling threads
527+
* stable between our for_each_thread loops. If the list of
528+
* available sibling threads changes between these for_each_thread
529+
* loops, we make up for that by continuing to look for threads until
530+
* they are all discovered and have entered their task_work, where
531+
* they are unable to spawn new threads.
528532
*/
529533
do {
530534
/* In RCU read-lock, count the threads we need. */
@@ -541,31 +545,36 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
541545
}
542546

543547
/*
544-
* The "all_prepared" barrier is used locally to the loop body, this use
545-
* of for_each_thread(). We can reset it on each loop iteration because
546-
* all previous loop iterations are done with it already.
548+
* The "all_prepared" barrier is used locally to the loop body,
549+
* this use of for_each_thread(). We can reset it on each loop
550+
* iteration because all previous loop iterations are done with
551+
* it already.
547552
*
548-
* num_preparing is initialized to 1 so that the counter can not go to 0
549-
* and mark the completion as done before all task works are registered.
550-
* We decrement it at the end of the loop body.
553+
* num_preparing is initialized to 1 so that the counter can
554+
* not go to 0 and mark the completion as done before all task
555+
* works are registered. We decrement it at the end of the
556+
* loop body.
551557
*/
552558
atomic_set(&shared_ctx.num_preparing, 1);
553559
reinit_completion(&shared_ctx.all_prepared);
554560

555561
/*
556-
* In RCU read-lock, schedule task work on newly discovered sibling
557-
* tasks.
562+
* In RCU read-lock, schedule task work on newly discovered
563+
* sibling tasks.
558564
*/
559565
found_more_threads = schedule_task_work(&works, &shared_ctx);
560566

561567
/*
562-
* Decrement num_preparing for current, to undo that we initialized it
563-
* to 1 a few lines above.
568+
* Decrement num_preparing for current, to undo that we
569+
* initialized it to 1 a few lines above.
564570
*/
565571
if (atomic_dec_return(&shared_ctx.num_preparing) > 0) {
566572
if (wait_for_completion_interruptible(
567573
&shared_ctx.all_prepared)) {
568-
/* In case of interruption, we need to retry the system call. */
574+
/*
575+
* In case of interruption, we need to retry
576+
* the system call.
577+
*/
569578
atomic_set(&shared_ctx.preparation_error,
570579
-ERESTARTNOINTR);
571580

@@ -598,8 +607,8 @@ int landlock_restrict_sibling_threads(const struct cred *old_cred,
598607
complete_all(&shared_ctx.ready_to_commit);
599608

600609
/*
601-
* Decrement num_unfinished for current, to undo that we initialized it to 1
602-
* at the beginning.
610+
* Decrement num_unfinished for current, to undo that we initialized it
611+
* to 1 at the beginning.
603612
*/
604613
if (atomic_dec_return(&shared_ctx.num_unfinished) > 0)
605614
wait_for_completion(&shared_ctx.all_finished);

0 commit comments

Comments
 (0)