@@ -383,9 +383,6 @@ xfs_zone_gc_iter_irec(
383383 struct xfs_rmap_irec * irec ;
384384 int error ;
385385
386- if (!iter -> victim_rtg )
387- return false;
388-
389386retry :
390387 if (iter -> rec_idx == iter -> rec_count ) {
391388 error = xfs_zone_gc_query (mp , iter );
@@ -555,7 +552,7 @@ xfs_zone_gc_steal_open(
555552/*
556553 * Ensure we have a valid open zone to write to.
557554 */
558- static struct xfs_open_zone *
555+ static bool
559556xfs_zone_gc_select_target (
560557 struct xfs_mount * mp )
561558{
@@ -568,14 +565,14 @@ xfs_zone_gc_select_target(
568565 * zone.
569566 */
570567 if (oz -> oz_allocated < rtg_blocks (oz -> oz_rtg ))
571- return oz ;
568+ return true ;
572569
573570 /*
574571 * Wait for all writes to the current zone to finish before
575572 * picking a new one.
576573 */
577574 if (oz -> oz_written < rtg_blocks (oz -> oz_rtg ))
578- return NULL ;
575+ return false ;
579576 }
580577
581578 /*
@@ -589,7 +586,7 @@ xfs_zone_gc_select_target(
589586 spin_lock (& zi -> zi_open_zones_lock );
590587 zi -> zi_open_gc_zone = oz ;
591588 spin_unlock (& zi -> zi_open_zones_lock );
592- return oz ;
589+ return !! oz ;
593590}
594591
595592static void
@@ -604,19 +601,15 @@ xfs_zone_gc_end_io(
604601 wake_up_process (data -> mp -> m_zone_info -> zi_gc_thread );
605602}
606603
607- static struct xfs_open_zone *
604+ static bool
608605xfs_zone_gc_alloc_blocks (
609606 struct xfs_zone_gc_data * data ,
610607 xfs_extlen_t * count_fsb ,
611608 xfs_daddr_t * daddr ,
612609 bool * is_seq )
613610{
614611 struct xfs_mount * mp = data -> mp ;
615- struct xfs_open_zone * oz ;
616-
617- oz = xfs_zone_gc_select_target (mp );
618- if (!oz )
619- return NULL ;
612+ struct xfs_open_zone * oz = mp -> m_zone_info -> zi_open_gc_zone ;
620613
621614 * count_fsb = min (* count_fsb , XFS_B_TO_FSB (mp , data -> scratch_available ));
622615
@@ -638,15 +631,15 @@ xfs_zone_gc_alloc_blocks(
638631 spin_unlock (& mp -> m_sb_lock );
639632
640633 if (!* count_fsb )
641- return NULL ;
634+ return false ;
642635
643636 * daddr = xfs_gbno_to_daddr (rtg_group (oz -> oz_rtg ), 0 );
644637 * is_seq = bdev_zone_is_seq (mp -> m_rtdev_targp -> bt_bdev , * daddr );
645638 if (!* is_seq )
646639 * daddr += XFS_FSB_TO_BB (mp , oz -> oz_allocated );
647640 oz -> oz_allocated += * count_fsb ;
648641 atomic_inc (& oz -> oz_ref );
649- return oz ;
642+ return true ;
650643}
651644
652645static void
@@ -671,14 +664,35 @@ xfs_zone_gc_add_data(
671664 } while (len );
672665}
673666
667+ static bool
668+ xfs_zone_gc_can_start_chunk (
669+ struct xfs_zone_gc_data * data )
670+ {
671+
672+ if (xfs_is_shutdown (data -> mp ))
673+ return false;
674+ if (!data -> scratch_available )
675+ return false;
676+
677+ if (!data -> iter .victim_rtg ) {
678+ if (kthread_should_stop () || kthread_should_park ())
679+ return false;
680+ if (!xfs_zoned_need_gc (data -> mp ))
681+ return false;
682+ if (!xfs_zone_gc_select_victim (data ))
683+ return false;
684+ }
685+
686+ return xfs_zone_gc_select_target (data -> mp );
687+ }
688+
674689static bool
675690xfs_zone_gc_start_chunk (
676691 struct xfs_zone_gc_data * data )
677692{
678693 struct xfs_zone_gc_iter * iter = & data -> iter ;
679694 struct xfs_mount * mp = data -> mp ;
680695 struct block_device * bdev = mp -> m_rtdev_targp -> bt_bdev ;
681- struct xfs_open_zone * oz ;
682696 struct xfs_rmap_irec irec ;
683697 struct xfs_gc_bio * chunk ;
684698 struct xfs_inode * ip ;
@@ -687,14 +701,15 @@ xfs_zone_gc_start_chunk(
687701 unsigned int len ;
688702 bool is_seq ;
689703
690- if (xfs_is_shutdown ( mp ))
704+ if (! xfs_zone_gc_can_start_chunk ( data ))
691705 return false;
692706
707+ set_current_state (TASK_RUNNING );
693708 if (!xfs_zone_gc_iter_irec (mp , iter , & irec , & ip ))
694709 return false;
695- oz = xfs_zone_gc_alloc_blocks ( data , & irec . rm_blockcount , & daddr ,
696- & is_seq );
697- if (! oz ) {
710+
711+ if (! xfs_zone_gc_alloc_blocks ( data , & irec . rm_blockcount , & daddr ,
712+ & is_seq ) ) {
698713 xfs_irele (ip );
699714 return false;
700715 }
@@ -713,7 +728,7 @@ xfs_zone_gc_start_chunk(
713728 chunk -> new_daddr = daddr ;
714729 chunk -> is_seq = is_seq ;
715730 chunk -> data = data ;
716- chunk -> oz = oz ;
731+ chunk -> oz = mp -> m_zone_info -> zi_open_gc_zone ;
717732 chunk -> victim_rtg = iter -> victim_rtg ;
718733 atomic_inc (& rtg_group (chunk -> victim_rtg )-> xg_active_ref );
719734 atomic_inc (& chunk -> victim_rtg -> rtg_gccount );
@@ -1007,33 +1022,6 @@ xfs_zone_gc_reset_zones(
10071022 } while (next );
10081023}
10091024
1010- static bool
1011- xfs_zone_gc_should_start_new_work (
1012- struct xfs_zone_gc_data * data )
1013- {
1014- struct xfs_open_zone * oz ;
1015-
1016- if (xfs_is_shutdown (data -> mp ))
1017- return false;
1018- if (!data -> scratch_available )
1019- return false;
1020-
1021- oz = xfs_zone_gc_select_target (data -> mp );
1022- if (!oz || oz -> oz_allocated == rtg_blocks (oz -> oz_rtg ))
1023- return false;
1024-
1025- if (!data -> iter .victim_rtg ) {
1026- if (kthread_should_stop () || kthread_should_park ())
1027- return false;
1028- if (!xfs_zoned_need_gc (data -> mp ))
1029- return false;
1030- if (!xfs_zone_gc_select_victim (data ))
1031- return false;
1032- }
1033-
1034- return true;
1035- }
1036-
10371025/*
10381026 * Handle the work to read and write data for GC and to reset the zones,
10391027 * including handling all completions.
@@ -1083,13 +1071,10 @@ xfs_zone_gc_handle_work(
10831071 }
10841072 blk_finish_plug (& plug );
10851073
1086- if (xfs_zone_gc_should_start_new_work (data )) {
1087- set_current_state (TASK_RUNNING );
1088- blk_start_plug (& plug );
1089- while (xfs_zone_gc_start_chunk (data ))
1090- ;
1091- blk_finish_plug (& plug );
1092- }
1074+ blk_start_plug (& plug );
1075+ while (xfs_zone_gc_start_chunk (data ))
1076+ ;
1077+ blk_finish_plug (& plug );
10931078}
10941079
10951080/*
0 commit comments