Skip to content

Commit eff8d16

Browse files
Christoph Hellwigaxboe
authored andcommitted
zloop: refactor zloop_rw
Split out two helpers functions to make the function more readable and to avoid conditional locking. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Link: https://patch.msgid.link/20260323071156.2940772-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 67807fb commit eff8d16

1 file changed

Lines changed: 124 additions & 116 deletions

File tree

drivers/block/zloop.c

Lines changed: 124 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -378,125 +378,22 @@ static void zloop_rw_complete(struct kiocb *iocb, long ret)
378378
zloop_put_cmd(cmd);
379379
}
380380

381-
static void zloop_rw(struct zloop_cmd *cmd)
381+
static int zloop_do_rw(struct zloop_cmd *cmd)
382382
{
383383
struct request *rq = blk_mq_rq_from_pdu(cmd);
384+
int rw = req_op(rq) == REQ_OP_READ ? ITER_DEST : ITER_SOURCE;
385+
unsigned int nr_bvec = blk_rq_nr_bvec(rq);
384386
struct zloop_device *zlo = rq->q->queuedata;
385-
unsigned int zone_no = rq_zone_no(rq);
386-
sector_t sector = blk_rq_pos(rq);
387-
sector_t nr_sectors = blk_rq_sectors(rq);
388-
bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
389-
bool is_write = req_op(rq) == REQ_OP_WRITE || is_append;
390-
int rw = is_write ? ITER_SOURCE : ITER_DEST;
387+
struct zloop_zone *zone = &zlo->zones[rq_zone_no(rq)];
391388
struct req_iterator rq_iter;
392-
struct zloop_zone *zone;
393389
struct iov_iter iter;
394-
struct bio_vec tmp;
395-
unsigned long flags;
396-
sector_t zone_end;
397-
unsigned int nr_bvec;
398-
int ret;
399-
400-
atomic_set(&cmd->ref, 2);
401-
cmd->sector = sector;
402-
cmd->nr_sectors = nr_sectors;
403-
cmd->ret = 0;
404-
405-
if (WARN_ON_ONCE(is_append && !zlo->zone_append)) {
406-
ret = -EIO;
407-
goto out;
408-
}
409-
410-
/* We should never get an I/O beyond the device capacity. */
411-
if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) {
412-
ret = -EIO;
413-
goto out;
414-
}
415-
zone = &zlo->zones[zone_no];
416-
zone_end = zone->start + zlo->zone_capacity;
417-
418-
/*
419-
* The block layer should never send requests that are not fully
420-
* contained within the zone.
421-
*/
422-
if (WARN_ON_ONCE(sector + nr_sectors > zone->start + zlo->zone_size)) {
423-
ret = -EIO;
424-
goto out;
425-
}
426-
427-
if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
428-
mutex_lock(&zone->lock);
429-
ret = zloop_update_seq_zone(zlo, zone_no);
430-
mutex_unlock(&zone->lock);
431-
if (ret)
432-
goto out;
433-
}
434-
435-
if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
436-
mutex_lock(&zone->lock);
437-
438-
spin_lock_irqsave(&zone->wp_lock, flags);
439-
440-
/*
441-
* Zone append operations always go at the current write
442-
* pointer, but regular write operations must already be
443-
* aligned to the write pointer when submitted.
444-
*/
445-
if (is_append) {
446-
/*
447-
* If ordered zone append is in use, we already checked
448-
* and set the target sector in zloop_queue_rq().
449-
*/
450-
if (!zlo->ordered_zone_append) {
451-
if (zone->cond == BLK_ZONE_COND_FULL ||
452-
zone->wp + nr_sectors > zone_end) {
453-
spin_unlock_irqrestore(&zone->wp_lock,
454-
flags);
455-
ret = -EIO;
456-
goto unlock;
457-
}
458-
sector = zone->wp;
459-
}
460-
cmd->sector = sector;
461-
} else if (sector != zone->wp) {
462-
spin_unlock_irqrestore(&zone->wp_lock, flags);
463-
pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
464-
zone_no, sector, zone->wp);
465-
ret = -EIO;
466-
goto unlock;
467-
}
468-
469-
/* Implicitly open the target zone. */
470-
if (zone->cond == BLK_ZONE_COND_CLOSED ||
471-
zone->cond == BLK_ZONE_COND_EMPTY)
472-
zone->cond = BLK_ZONE_COND_IMP_OPEN;
473-
474-
/*
475-
* Advance the write pointer, unless ordered zone append is in
476-
* use. If the write fails, the write pointer position will be
477-
* corrected when the next I/O starts execution.
478-
*/
479-
if (!is_append || !zlo->ordered_zone_append) {
480-
zone->wp += nr_sectors;
481-
if (zone->wp == zone_end) {
482-
zone->cond = BLK_ZONE_COND_FULL;
483-
zone->wp = ULLONG_MAX;
484-
}
485-
}
486-
487-
spin_unlock_irqrestore(&zone->wp_lock, flags);
488-
}
489-
490-
nr_bvec = blk_rq_nr_bvec(rq);
491390

492391
if (rq->bio != rq->biotail) {
493-
struct bio_vec *bvec;
392+
struct bio_vec tmp, *bvec;
494393

495394
cmd->bvec = kmalloc_objs(*cmd->bvec, nr_bvec, GFP_NOIO);
496-
if (!cmd->bvec) {
497-
ret = -EIO;
498-
goto unlock;
499-
}
395+
if (!cmd->bvec)
396+
return -EIO;
500397

501398
/*
502399
* The bios of the request may be started from the middle of
@@ -522,20 +419,131 @@ static void zloop_rw(struct zloop_cmd *cmd)
522419
iter.iov_offset = rq->bio->bi_iter.bi_bvec_done;
523420
}
524421

525-
cmd->iocb.ki_pos = (sector - zone->start) << SECTOR_SHIFT;
422+
cmd->iocb.ki_pos = (cmd->sector - zone->start) << SECTOR_SHIFT;
526423
cmd->iocb.ki_filp = zone->file;
527424
cmd->iocb.ki_complete = zloop_rw_complete;
528425
if (!zlo->buffered_io)
529426
cmd->iocb.ki_flags = IOCB_DIRECT;
530427
cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
531428

532429
if (rw == ITER_SOURCE)
533-
ret = zone->file->f_op->write_iter(&cmd->iocb, &iter);
534-
else
535-
ret = zone->file->f_op->read_iter(&cmd->iocb, &iter);
536-
unlock:
537-
if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write)
430+
return zone->file->f_op->write_iter(&cmd->iocb, &iter);
431+
return zone->file->f_op->read_iter(&cmd->iocb, &iter);
432+
}
433+
434+
static int zloop_seq_write_prep(struct zloop_cmd *cmd)
435+
{
436+
struct request *rq = blk_mq_rq_from_pdu(cmd);
437+
struct zloop_device *zlo = rq->q->queuedata;
438+
unsigned int zone_no = rq_zone_no(rq);
439+
sector_t nr_sectors = blk_rq_sectors(rq);
440+
bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
441+
struct zloop_zone *zone = &zlo->zones[zone_no];
442+
sector_t zone_end = zone->start + zlo->zone_capacity;
443+
unsigned long flags;
444+
int ret = 0;
445+
446+
spin_lock_irqsave(&zone->wp_lock, flags);
447+
448+
/*
449+
* Zone append operations always go at the current write pointer, but
450+
* regular write operations must already be aligned to the write pointer
451+
* when submitted.
452+
*/
453+
if (is_append) {
454+
/*
455+
* If ordered zone append is in use, we already checked and set
456+
* the target sector in zloop_queue_rq().
457+
*/
458+
if (!zlo->ordered_zone_append) {
459+
if (zone->cond == BLK_ZONE_COND_FULL ||
460+
zone->wp + nr_sectors > zone_end) {
461+
ret = -EIO;
462+
goto out_unlock;
463+
}
464+
cmd->sector = zone->wp;
465+
}
466+
} else {
467+
if (cmd->sector != zone->wp) {
468+
pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
469+
zone_no, cmd->sector, zone->wp);
470+
ret = -EIO;
471+
goto out_unlock;
472+
}
473+
}
474+
475+
/* Implicitly open the target zone. */
476+
if (zone->cond == BLK_ZONE_COND_CLOSED ||
477+
zone->cond == BLK_ZONE_COND_EMPTY)
478+
zone->cond = BLK_ZONE_COND_IMP_OPEN;
479+
480+
/*
481+
* Advance the write pointer, unless ordered zone append is in use. If
482+
* the write fails, the write pointer position will be corrected when
483+
* the next I/O starts execution.
484+
*/
485+
if (!is_append || !zlo->ordered_zone_append) {
486+
zone->wp += nr_sectors;
487+
if (zone->wp == zone_end) {
488+
zone->cond = BLK_ZONE_COND_FULL;
489+
zone->wp = ULLONG_MAX;
490+
}
491+
}
492+
out_unlock:
493+
spin_unlock_irqrestore(&zone->wp_lock, flags);
494+
return ret;
495+
}
496+
497+
static void zloop_rw(struct zloop_cmd *cmd)
498+
{
499+
struct request *rq = blk_mq_rq_from_pdu(cmd);
500+
struct zloop_device *zlo = rq->q->queuedata;
501+
unsigned int zone_no = rq_zone_no(rq);
502+
sector_t nr_sectors = blk_rq_sectors(rq);
503+
bool is_append = req_op(rq) == REQ_OP_ZONE_APPEND;
504+
bool is_write = req_op(rq) == REQ_OP_WRITE || is_append;
505+
struct zloop_zone *zone;
506+
int ret = -EIO;
507+
508+
atomic_set(&cmd->ref, 2);
509+
cmd->sector = blk_rq_pos(rq);
510+
cmd->nr_sectors = nr_sectors;
511+
cmd->ret = 0;
512+
513+
if (WARN_ON_ONCE(is_append && !zlo->zone_append))
514+
goto out;
515+
516+
/* We should never get an I/O beyond the device capacity. */
517+
if (WARN_ON_ONCE(zone_no >= zlo->nr_zones))
518+
goto out;
519+
520+
zone = &zlo->zones[zone_no];
521+
522+
/*
523+
* The block layer should never send requests that are not fully
524+
* contained within the zone.
525+
*/
526+
if (WARN_ON_ONCE(cmd->sector + nr_sectors >
527+
zone->start + zlo->zone_size))
528+
goto out;
529+
530+
if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
531+
mutex_lock(&zone->lock);
532+
ret = zloop_update_seq_zone(zlo, zone_no);
538533
mutex_unlock(&zone->lock);
534+
if (ret)
535+
goto out;
536+
}
537+
538+
if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
539+
mutex_lock(&zone->lock);
540+
ret = zloop_seq_write_prep(cmd);
541+
if (!ret)
542+
ret = zloop_do_rw(cmd);
543+
mutex_unlock(&zone->lock);
544+
} else {
545+
ret = zloop_do_rw(cmd);
546+
}
539547
out:
540548
if (ret != -EIOCBQUEUED)
541549
zloop_rw_complete(&cmd->iocb, ret);

0 commit comments

Comments
 (0)