@@ -213,6 +213,7 @@ struct blkfront_info
213213 struct blk_mq_tag_set tag_set ;
214214 struct blkfront_ring_info * rinfo ;
215215 unsigned int nr_rings ;
216+ unsigned int rinfo_size ;
216217 /* Save uncomplete reqs and bios for migration. */
217218 struct list_head requests ;
218219 struct bio_list bio_list ;
@@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
259260static void blkfront_gather_backend_features (struct blkfront_info * info );
260261static int negotiate_mq (struct blkfront_info * info );
261262
263+ #define for_each_rinfo (info , ptr , idx ) \
264+ for ((ptr) = (info)->rinfo, (idx) = 0; \
265+ (idx) < (info)->nr_rings; \
266+ (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
267+
268+ static inline struct blkfront_ring_info *
269+ get_rinfo (const struct blkfront_info * info , unsigned int i )
270+ {
271+ BUG_ON (i >= info -> nr_rings );
272+ return (void * )info -> rinfo + i * info -> rinfo_size ;
273+ }
274+
262275static int get_id_from_freelist (struct blkfront_ring_info * rinfo )
263276{
264277 unsigned long free = rinfo -> shadow_free ;
@@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
883896 struct blkfront_info * info = hctx -> queue -> queuedata ;
884897 struct blkfront_ring_info * rinfo = NULL ;
885898
886- BUG_ON (info -> nr_rings <= qid );
887- rinfo = & info -> rinfo [qid ];
899+ rinfo = get_rinfo (info , qid );
888900 blk_mq_start_request (qd -> rq );
889901 spin_lock_irqsave (& rinfo -> ring_lock , flags );
890902 if (RING_FULL (& rinfo -> ring ))
@@ -1181,16 +1193,15 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
11811193static void xlvbd_release_gendisk (struct blkfront_info * info )
11821194{
11831195 unsigned int minor , nr_minors , i ;
1196+ struct blkfront_ring_info * rinfo ;
11841197
11851198 if (info -> rq == NULL )
11861199 return ;
11871200
11881201 /* No more blkif_request(). */
11891202 blk_mq_stop_hw_queues (info -> rq );
11901203
1191- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
1192- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
1193-
1204+ for_each_rinfo (info , rinfo , i ) {
11941205 /* No more gnttab callback work. */
11951206 gnttab_cancel_free_callback (& rinfo -> callback );
11961207
@@ -1339,6 +1350,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
13391350static void blkif_free (struct blkfront_info * info , int suspend )
13401351{
13411352 unsigned int i ;
1353+ struct blkfront_ring_info * rinfo ;
13421354
13431355 /* Prevent new requests being issued until we fix things up. */
13441356 info -> connected = suspend ?
@@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
13471359 if (info -> rq )
13481360 blk_mq_stop_hw_queues (info -> rq );
13491361
1350- for ( i = 0 ; i < info -> nr_rings ; i ++ )
1351- blkif_free_ring (& info -> rinfo [ i ] );
1362+ for_each_rinfo ( info , rinfo , i )
1363+ blkif_free_ring (rinfo );
13521364
13531365 kvfree (info -> rinfo );
13541366 info -> rinfo = NULL ;
@@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
17751787 int err ;
17761788 unsigned int i , max_page_order ;
17771789 unsigned int ring_page_order ;
1790+ struct blkfront_ring_info * rinfo ;
17781791
17791792 if (!info )
17801793 return - ENODEV ;
@@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
17881801 if (err )
17891802 goto destroy_blkring ;
17901803
1791- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
1792- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
1793-
1804+ for_each_rinfo (info , rinfo , i ) {
17941805 /* Create shared ring, alloc event channel. */
17951806 err = setup_blkring (dev , rinfo );
17961807 if (err )
@@ -1815,7 +1826,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
18151826
18161827 /* We already got the number of queues/rings in _probe */
18171828 if (info -> nr_rings == 1 ) {
1818- err = write_per_ring_nodes (xbt , & info -> rinfo [ 0 ] , dev -> nodename );
1829+ err = write_per_ring_nodes (xbt , info -> rinfo , dev -> nodename );
18191830 if (err )
18201831 goto destroy_blkring ;
18211832 } else {
@@ -1837,10 +1848,10 @@ static int talk_to_blkback(struct xenbus_device *dev,
18371848 goto abort_transaction ;
18381849 }
18391850
1840- for ( i = 0 ; i < info -> nr_rings ; i ++ ) {
1851+ for_each_rinfo ( info , rinfo , i ) {
18411852 memset (path , 0 , pathsize );
18421853 snprintf (path , pathsize , "%s/queue-%u" , dev -> nodename , i );
1843- err = write_per_ring_nodes (xbt , & info -> rinfo [ i ] , path );
1854+ err = write_per_ring_nodes (xbt , rinfo , path );
18441855 if (err ) {
18451856 kfree (path );
18461857 goto destroy_blkring ;
@@ -1868,9 +1879,8 @@ static int talk_to_blkback(struct xenbus_device *dev,
18681879 goto destroy_blkring ;
18691880 }
18701881
1871- for ( i = 0 ; i < info -> nr_rings ; i ++ ) {
1882+ for_each_rinfo ( info , rinfo , i ) {
18721883 unsigned int j ;
1873- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
18741884
18751885 for (j = 0 ; j < BLK_RING_SIZE (info ); j ++ )
18761886 rinfo -> shadow [j ].req .u .rw .id = j + 1 ;
@@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info)
19001910{
19011911 unsigned int backend_max_queues ;
19021912 unsigned int i ;
1913+ struct blkfront_ring_info * rinfo ;
19031914
19041915 BUG_ON (info -> nr_rings );
19051916
@@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info)
19111922 if (!info -> nr_rings )
19121923 info -> nr_rings = 1 ;
19131924
1914- info -> rinfo = kvcalloc (info -> nr_rings ,
1915- struct_size (info -> rinfo , shadow ,
1916- BLK_RING_SIZE (info )),
1917- GFP_KERNEL );
1925+ info -> rinfo_size = struct_size (info -> rinfo , shadow ,
1926+ BLK_RING_SIZE (info ));
1927+ info -> rinfo = kvcalloc (info -> nr_rings , info -> rinfo_size , GFP_KERNEL );
19181928 if (!info -> rinfo ) {
19191929 xenbus_dev_fatal (info -> xbdev , - ENOMEM , "allocating ring_info structure" );
19201930 info -> nr_rings = 0 ;
19211931 return - ENOMEM ;
19221932 }
19231933
1924- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
1925- struct blkfront_ring_info * rinfo ;
1926-
1927- rinfo = & info -> rinfo [i ];
1934+ for_each_rinfo (info , rinfo , i ) {
19281935 INIT_LIST_HEAD (& rinfo -> indirect_pages );
19291936 INIT_LIST_HEAD (& rinfo -> grants );
19301937 rinfo -> dev_info = info ;
@@ -2017,16 +2024,15 @@ static int blkif_recover(struct blkfront_info *info)
20172024 int rc ;
20182025 struct bio * bio ;
20192026 unsigned int segs ;
2027+ struct blkfront_ring_info * rinfo ;
20202028
20212029 blkfront_gather_backend_features (info );
20222030 /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
20232031 blkif_set_queue_limits (info );
20242032 segs = info -> max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST ;
20252033 blk_queue_max_segments (info -> rq , segs / GRANTS_PER_PSEG );
20262034
2027- for (r_index = 0 ; r_index < info -> nr_rings ; r_index ++ ) {
2028- struct blkfront_ring_info * rinfo = & info -> rinfo [r_index ];
2029-
2035+ for_each_rinfo (info , rinfo , r_index ) {
20302036 rc = blkfront_setup_indirect (rinfo );
20312037 if (rc )
20322038 return rc ;
@@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info)
20362042 /* Now safe for us to use the shared ring */
20372043 info -> connected = BLKIF_STATE_CONNECTED ;
20382044
2039- for (r_index = 0 ; r_index < info -> nr_rings ; r_index ++ ) {
2040- struct blkfront_ring_info * rinfo ;
2041-
2042- rinfo = & info -> rinfo [r_index ];
2045+ for_each_rinfo (info , rinfo , r_index ) {
20432046 /* Kick any other new requests queued since we resumed */
20442047 kick_pending_request_queues (rinfo );
20452048 }
@@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev)
20722075 struct blkfront_info * info = dev_get_drvdata (& dev -> dev );
20732076 int err = 0 ;
20742077 unsigned int i , j ;
2078+ struct blkfront_ring_info * rinfo ;
20752079
20762080 dev_dbg (& dev -> dev , "blkfront_resume: %s\n" , dev -> nodename );
20772081
20782082 bio_list_init (& info -> bio_list );
20792083 INIT_LIST_HEAD (& info -> requests );
2080- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
2081- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
2084+ for_each_rinfo (info , rinfo , i ) {
20822085 struct bio_list merge_bio ;
20832086 struct blk_shadow * shadow = rinfo -> shadow ;
20842087
@@ -2337,6 +2340,7 @@ static void blkfront_connect(struct blkfront_info *info)
23372340 unsigned int binfo ;
23382341 char * envp [] = { "RESIZE=1" , NULL };
23392342 int err , i ;
2343+ struct blkfront_ring_info * rinfo ;
23402344
23412345 switch (info -> connected ) {
23422346 case BLKIF_STATE_CONNECTED :
@@ -2394,8 +2398,8 @@ static void blkfront_connect(struct blkfront_info *info)
23942398 "physical-sector-size" ,
23952399 sector_size );
23962400 blkfront_gather_backend_features (info );
2397- for ( i = 0 ; i < info -> nr_rings ; i ++ ) {
2398- err = blkfront_setup_indirect (& info -> rinfo [ i ] );
2401+ for_each_rinfo ( info , rinfo , i ) {
2402+ err = blkfront_setup_indirect (rinfo );
23992403 if (err ) {
24002404 xenbus_dev_fatal (info -> xbdev , err , "setup_indirect at %s" ,
24012405 info -> xbdev -> otherend );
@@ -2416,8 +2420,8 @@ static void blkfront_connect(struct blkfront_info *info)
24162420
24172421 /* Kick pending requests. */
24182422 info -> connected = BLKIF_STATE_CONNECTED ;
2419- for ( i = 0 ; i < info -> nr_rings ; i ++ )
2420- kick_pending_request_queues (& info -> rinfo [ i ] );
2423+ for_each_rinfo ( info , rinfo , i )
2424+ kick_pending_request_queues (rinfo );
24212425
24222426 device_add_disk (& info -> xbdev -> dev , info -> gd , NULL );
24232427
@@ -2652,9 +2656,9 @@ static void purge_persistent_grants(struct blkfront_info *info)
26522656{
26532657 unsigned int i ;
26542658 unsigned long flags ;
2659+ struct blkfront_ring_info * rinfo ;
26552660
2656- for (i = 0 ; i < info -> nr_rings ; i ++ ) {
2657- struct blkfront_ring_info * rinfo = & info -> rinfo [i ];
2661+ for_each_rinfo (info , rinfo , i ) {
26582662 struct grant * gnt_list_entry , * tmp ;
26592663
26602664 spin_lock_irqsave (& rinfo -> ring_lock , flags );
0 commit comments