Skip to content

Commit 8f21b63

Browse files
sagigrimberggregkh
authored andcommitted
nvme-loop: handle cpu unplug when re-establishing the controller
[ Upstream commit 945dd5b ] If a cpu unplug event has occured, we need to take the minimum of the provided nr_io_queues and the number of online cpus, otherwise we won't be able to connect them as blk-mq mapping won't dispatch to those queues. Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Sasha Levin <alexander.levin@verizon.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent c9bbd27 commit 8f21b63

1 file changed

Lines changed: 50 additions & 38 deletions

File tree

drivers/nvme/target/loop.c

Lines changed: 50 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
223223
static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
224224
struct nvme_loop_iod *iod, unsigned int queue_idx)
225225
{
226-
BUG_ON(queue_idx >= ctrl->queue_count);
227-
228226
iod->req.cmd = &iod->cmd;
229227
iod->req.rsp = &iod->rsp;
230228
iod->queue = &ctrl->queues[queue_idx];
@@ -314,6 +312,43 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
314312
kfree(ctrl);
315313
}
316314

315+
static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
316+
{
317+
int i;
318+
319+
for (i = 1; i < ctrl->queue_count; i++)
320+
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
321+
}
322+
323+
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
324+
{
325+
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
326+
unsigned int nr_io_queues;
327+
int ret, i;
328+
329+
nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
330+
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
331+
if (ret || !nr_io_queues)
332+
return ret;
333+
334+
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
335+
336+
for (i = 1; i <= nr_io_queues; i++) {
337+
ctrl->queues[i].ctrl = ctrl;
338+
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
339+
if (ret)
340+
goto out_destroy_queues;
341+
342+
ctrl->queue_count++;
343+
}
344+
345+
return 0;
346+
347+
out_destroy_queues:
348+
nvme_loop_destroy_io_queues(ctrl);
349+
return ret;
350+
}
351+
317352
static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
318353
{
319354
int error;
@@ -385,17 +420,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
385420

386421
static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
387422
{
388-
int i;
389-
390423
nvme_stop_keep_alive(&ctrl->ctrl);
391424

392425
if (ctrl->queue_count > 1) {
393426
nvme_stop_queues(&ctrl->ctrl);
394427
blk_mq_tagset_busy_iter(&ctrl->tag_set,
395428
nvme_cancel_request, &ctrl->ctrl);
396-
397-
for (i = 1; i < ctrl->queue_count; i++)
398-
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
429+
nvme_loop_destroy_io_queues(ctrl);
399430
}
400431

401432
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
@@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
467498
if (ret)
468499
goto out_disable;
469500

470-
for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
471-
ctrl->queues[i].ctrl = ctrl;
472-
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
473-
if (ret)
474-
goto out_free_queues;
475-
476-
ctrl->queue_count++;
477-
}
501+
ret = nvme_loop_init_io_queues(ctrl);
502+
if (ret)
503+
goto out_destroy_admin;
478504

479-
for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) {
505+
for (i = 1; i < ctrl->queue_count; i++) {
480506
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
481507
if (ret)
482-
goto out_free_queues;
508+
goto out_destroy_io;
483509
}
484510

485511
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
492518

493519
return;
494520

495-
out_free_queues:
496-
for (i = 1; i < ctrl->queue_count; i++)
497-
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
521+
out_destroy_io:
522+
nvme_loop_destroy_io_queues(ctrl);
523+
out_destroy_admin:
498524
nvme_loop_destroy_admin_queue(ctrl);
499525
out_disable:
500526
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
533559

534560
static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
535561
{
536-
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
537562
int ret, i;
538563

539-
ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
540-
if (ret || !opts->nr_io_queues)
564+
ret = nvme_loop_init_io_queues(ctrl);
565+
if (ret)
541566
return ret;
542567

543-
dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
544-
opts->nr_io_queues);
545-
546-
for (i = 1; i <= opts->nr_io_queues; i++) {
547-
ctrl->queues[i].ctrl = ctrl;
548-
ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
549-
if (ret)
550-
goto out_destroy_queues;
551-
552-
ctrl->queue_count++;
553-
}
554-
555568
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
556569
ctrl->tag_set.ops = &nvme_loop_mq_ops;
557570
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
@@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
575588
goto out_free_tagset;
576589
}
577590

578-
for (i = 1; i <= opts->nr_io_queues; i++) {
591+
for (i = 1; i < ctrl->queue_count; i++) {
579592
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
580593
if (ret)
581594
goto out_cleanup_connect_q;
@@ -588,8 +601,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
588601
out_free_tagset:
589602
blk_mq_free_tag_set(&ctrl->tag_set);
590603
out_destroy_queues:
591-
for (i = 1; i < ctrl->queue_count; i++)
592-
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
604+
nvme_loop_destroy_io_queues(ctrl);
593605
return ret;
594606
}
595607

0 commit comments

Comments
 (0)