This source file includes following definitions.
- to_loop_ctrl
- nvme_loop_queue_idx
- nvme_loop_complete_rq
- nvme_loop_tagset
- nvme_loop_queue_response
- nvme_loop_execute_work
- nvme_loop_queue_rq
- nvme_loop_submit_async_event
- nvme_loop_init_iod
- nvme_loop_init_request
- nvme_loop_init_hctx
- nvme_loop_init_admin_hctx
- nvme_loop_destroy_admin_queue
- nvme_loop_free_ctrl
- nvme_loop_destroy_io_queues
- nvme_loop_init_io_queues
- nvme_loop_connect_io_queues
- nvme_loop_configure_admin_queue
- nvme_loop_shutdown_ctrl
- nvme_loop_delete_ctrl_host
- nvme_loop_delete_ctrl
- nvme_loop_reset_ctrl_work
- nvme_loop_create_io_queues
- nvme_loop_find_port
- nvme_loop_create_ctrl
- nvme_loop_add_port
- nvme_loop_remove_port
- nvme_loop_init_module
- nvme_loop_cleanup_module
1
2
3
4
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/scatterlist.h>
8 #include <linux/blk-mq.h>
9 #include <linux/nvme.h>
10 #include <linux/module.h>
11 #include <linux/parser.h>
12 #include "nvmet.h"
13 #include "../host/nvme.h"
14 #include "../host/fabrics.h"
15
16 #define NVME_LOOP_MAX_SEGMENTS 256
17
18 struct nvme_loop_iod {
19 struct nvme_request nvme_req;
20 struct nvme_command cmd;
21 struct nvme_completion cqe;
22 struct nvmet_req req;
23 struct nvme_loop_queue *queue;
24 struct work_struct work;
25 struct sg_table sg_table;
26 struct scatterlist first_sgl[];
27 };
28
29 struct nvme_loop_ctrl {
30 struct nvme_loop_queue *queues;
31
32 struct blk_mq_tag_set admin_tag_set;
33
34 struct list_head list;
35 struct blk_mq_tag_set tag_set;
36 struct nvme_loop_iod async_event_iod;
37 struct nvme_ctrl ctrl;
38
39 struct nvmet_ctrl *target_ctrl;
40 struct nvmet_port *port;
41 };
42
43 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
44 {
45 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
46 }
47
48 enum nvme_loop_queue_flags {
49 NVME_LOOP_Q_LIVE = 0,
50 };
51
52 struct nvme_loop_queue {
53 struct nvmet_cq nvme_cq;
54 struct nvmet_sq nvme_sq;
55 struct nvme_loop_ctrl *ctrl;
56 unsigned long flags;
57 };
58
59 static LIST_HEAD(nvme_loop_ports);
60 static DEFINE_MUTEX(nvme_loop_ports_mutex);
61
62 static LIST_HEAD(nvme_loop_ctrl_list);
63 static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
64
65 static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
66 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
67
68 static const struct nvmet_fabrics_ops nvme_loop_ops;
69
70 static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
71 {
72 return queue - queue->ctrl->queues;
73 }
74
75 static void nvme_loop_complete_rq(struct request *req)
76 {
77 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
78
79 nvme_cleanup_cmd(req);
80 sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
81 nvme_complete_rq(req);
82 }
83
84 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
85 {
86 u32 queue_idx = nvme_loop_queue_idx(queue);
87
88 if (queue_idx == 0)
89 return queue->ctrl->admin_tag_set.tags[queue_idx];
90 return queue->ctrl->tag_set.tags[queue_idx - 1];
91 }
92
93 static void nvme_loop_queue_response(struct nvmet_req *req)
94 {
95 struct nvme_loop_queue *queue =
96 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
97 struct nvme_completion *cqe = req->cqe;
98
99
100
101
102
103
104
105 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
106 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
107 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
108 &cqe->result);
109 } else {
110 struct request *rq;
111
112 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
113 if (!rq) {
114 dev_err(queue->ctrl->ctrl.device,
115 "tag 0x%x on queue %d not found\n",
116 cqe->command_id, nvme_loop_queue_idx(queue));
117 return;
118 }
119
120 nvme_end_request(rq, cqe->status, cqe->result);
121 }
122 }
123
124 static void nvme_loop_execute_work(struct work_struct *work)
125 {
126 struct nvme_loop_iod *iod =
127 container_of(work, struct nvme_loop_iod, work);
128
129 nvmet_req_execute(&iod->req);
130 }
131
132 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
133 const struct blk_mq_queue_data *bd)
134 {
135 struct nvme_ns *ns = hctx->queue->queuedata;
136 struct nvme_loop_queue *queue = hctx->driver_data;
137 struct request *req = bd->rq;
138 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
139 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
140 blk_status_t ret;
141
142 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
143 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
144
145 ret = nvme_setup_cmd(ns, req, &iod->cmd);
146 if (ret)
147 return ret;
148
149 blk_mq_start_request(req);
150 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
151 iod->req.port = queue->ctrl->port;
152 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
153 &queue->nvme_sq, &nvme_loop_ops))
154 return BLK_STS_OK;
155
156 if (blk_rq_nr_phys_segments(req)) {
157 iod->sg_table.sgl = iod->first_sgl;
158 if (sg_alloc_table_chained(&iod->sg_table,
159 blk_rq_nr_phys_segments(req),
160 iod->sg_table.sgl, SG_CHUNK_SIZE)) {
161 nvme_cleanup_cmd(req);
162 return BLK_STS_RESOURCE;
163 }
164
165 iod->req.sg = iod->sg_table.sgl;
166 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
167 iod->req.transfer_len = blk_rq_payload_bytes(req);
168 }
169
170 schedule_work(&iod->work);
171 return BLK_STS_OK;
172 }
173
174 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
175 {
176 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
177 struct nvme_loop_queue *queue = &ctrl->queues[0];
178 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
179
180 memset(&iod->cmd, 0, sizeof(iod->cmd));
181 iod->cmd.common.opcode = nvme_admin_async_event;
182 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
183 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
184
185 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
186 &nvme_loop_ops)) {
187 dev_err(ctrl->ctrl.device, "failed async event work\n");
188 return;
189 }
190
191 schedule_work(&iod->work);
192 }
193
194 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
195 struct nvme_loop_iod *iod, unsigned int queue_idx)
196 {
197 iod->req.cmd = &iod->cmd;
198 iod->req.cqe = &iod->cqe;
199 iod->queue = &ctrl->queues[queue_idx];
200 INIT_WORK(&iod->work, nvme_loop_execute_work);
201 return 0;
202 }
203
204 static int nvme_loop_init_request(struct blk_mq_tag_set *set,
205 struct request *req, unsigned int hctx_idx,
206 unsigned int numa_node)
207 {
208 struct nvme_loop_ctrl *ctrl = set->driver_data;
209
210 nvme_req(req)->ctrl = &ctrl->ctrl;
211 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
212 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
213 }
214
215 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
216 unsigned int hctx_idx)
217 {
218 struct nvme_loop_ctrl *ctrl = data;
219 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
220
221 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
222
223 hctx->driver_data = queue;
224 return 0;
225 }
226
227 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
228 unsigned int hctx_idx)
229 {
230 struct nvme_loop_ctrl *ctrl = data;
231 struct nvme_loop_queue *queue = &ctrl->queues[0];
232
233 BUG_ON(hctx_idx != 0);
234
235 hctx->driver_data = queue;
236 return 0;
237 }
238
239 static const struct blk_mq_ops nvme_loop_mq_ops = {
240 .queue_rq = nvme_loop_queue_rq,
241 .complete = nvme_loop_complete_rq,
242 .init_request = nvme_loop_init_request,
243 .init_hctx = nvme_loop_init_hctx,
244 };
245
246 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
247 .queue_rq = nvme_loop_queue_rq,
248 .complete = nvme_loop_complete_rq,
249 .init_request = nvme_loop_init_request,
250 .init_hctx = nvme_loop_init_admin_hctx,
251 };
252
253 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
254 {
255 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
256 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
257 blk_cleanup_queue(ctrl->ctrl.admin_q);
258 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
259 blk_mq_free_tag_set(&ctrl->admin_tag_set);
260 }
261
262 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
263 {
264 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
265
266 if (list_empty(&ctrl->list))
267 goto free_ctrl;
268
269 mutex_lock(&nvme_loop_ctrl_mutex);
270 list_del(&ctrl->list);
271 mutex_unlock(&nvme_loop_ctrl_mutex);
272
273 if (nctrl->tagset) {
274 blk_cleanup_queue(ctrl->ctrl.connect_q);
275 blk_mq_free_tag_set(&ctrl->tag_set);
276 }
277 kfree(ctrl->queues);
278 nvmf_free_options(nctrl->opts);
279 free_ctrl:
280 kfree(ctrl);
281 }
282
283 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
284 {
285 int i;
286
287 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
288 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
289 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
290 }
291 }
292
293 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
294 {
295 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
296 unsigned int nr_io_queues;
297 int ret, i;
298
299 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
300 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
301 if (ret || !nr_io_queues)
302 return ret;
303
304 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
305
306 for (i = 1; i <= nr_io_queues; i++) {
307 ctrl->queues[i].ctrl = ctrl;
308 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
309 if (ret)
310 goto out_destroy_queues;
311
312 ctrl->ctrl.queue_count++;
313 }
314
315 return 0;
316
317 out_destroy_queues:
318 nvme_loop_destroy_io_queues(ctrl);
319 return ret;
320 }
321
322 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
323 {
324 int i, ret;
325
326 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
327 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
328 if (ret)
329 return ret;
330 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
331 }
332
333 return 0;
334 }
335
336 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
337 {
338 int error;
339
340 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
341 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
342 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
343 ctrl->admin_tag_set.reserved_tags = 2;
344 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
345 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
346 SG_CHUNK_SIZE * sizeof(struct scatterlist);
347 ctrl->admin_tag_set.driver_data = ctrl;
348 ctrl->admin_tag_set.nr_hw_queues = 1;
349 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
350 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
351
352 ctrl->queues[0].ctrl = ctrl;
353 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
354 if (error)
355 return error;
356 ctrl->ctrl.queue_count = 1;
357
358 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
359 if (error)
360 goto out_free_sq;
361 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
362
363 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
364 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
365 error = PTR_ERR(ctrl->ctrl.fabrics_q);
366 goto out_free_tagset;
367 }
368
369 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
370 if (IS_ERR(ctrl->ctrl.admin_q)) {
371 error = PTR_ERR(ctrl->ctrl.admin_q);
372 goto out_cleanup_fabrics_q;
373 }
374
375 error = nvmf_connect_admin_queue(&ctrl->ctrl);
376 if (error)
377 goto out_cleanup_queue;
378
379 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
380
381 error = nvme_enable_ctrl(&ctrl->ctrl);
382 if (error)
383 goto out_cleanup_queue;
384
385 ctrl->ctrl.max_hw_sectors =
386 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
387
388 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
389
390 error = nvme_init_identify(&ctrl->ctrl);
391 if (error)
392 goto out_cleanup_queue;
393
394 return 0;
395
396 out_cleanup_queue:
397 blk_cleanup_queue(ctrl->ctrl.admin_q);
398 out_cleanup_fabrics_q:
399 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
400 out_free_tagset:
401 blk_mq_free_tag_set(&ctrl->admin_tag_set);
402 out_free_sq:
403 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
404 return error;
405 }
406
407 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
408 {
409 if (ctrl->ctrl.queue_count > 1) {
410 nvme_stop_queues(&ctrl->ctrl);
411 blk_mq_tagset_busy_iter(&ctrl->tag_set,
412 nvme_cancel_request, &ctrl->ctrl);
413 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
414 nvme_loop_destroy_io_queues(ctrl);
415 }
416
417 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
418 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
419 nvme_shutdown_ctrl(&ctrl->ctrl);
420
421 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
422 nvme_cancel_request, &ctrl->ctrl);
423 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
424 nvme_loop_destroy_admin_queue(ctrl);
425 }
426
427 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
428 {
429 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
430 }
431
432 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
433 {
434 struct nvme_loop_ctrl *ctrl;
435
436 mutex_lock(&nvme_loop_ctrl_mutex);
437 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
438 if (ctrl->ctrl.cntlid == nctrl->cntlid)
439 nvme_delete_ctrl(&ctrl->ctrl);
440 }
441 mutex_unlock(&nvme_loop_ctrl_mutex);
442 }
443
444 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
445 {
446 struct nvme_loop_ctrl *ctrl =
447 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
448 bool changed;
449 int ret;
450
451 nvme_stop_ctrl(&ctrl->ctrl);
452 nvme_loop_shutdown_ctrl(ctrl);
453
454 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
455
456 WARN_ON_ONCE(1);
457 return;
458 }
459
460 ret = nvme_loop_configure_admin_queue(ctrl);
461 if (ret)
462 goto out_disable;
463
464 ret = nvme_loop_init_io_queues(ctrl);
465 if (ret)
466 goto out_destroy_admin;
467
468 ret = nvme_loop_connect_io_queues(ctrl);
469 if (ret)
470 goto out_destroy_io;
471
472 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
473 ctrl->ctrl.queue_count - 1);
474
475 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
476 WARN_ON_ONCE(!changed);
477
478 nvme_start_ctrl(&ctrl->ctrl);
479
480 return;
481
482 out_destroy_io:
483 nvme_loop_destroy_io_queues(ctrl);
484 out_destroy_admin:
485 nvme_loop_destroy_admin_queue(ctrl);
486 out_disable:
487 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
488 nvme_uninit_ctrl(&ctrl->ctrl);
489 nvme_put_ctrl(&ctrl->ctrl);
490 }
491
492 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
493 .name = "loop",
494 .module = THIS_MODULE,
495 .flags = NVME_F_FABRICS,
496 .reg_read32 = nvmf_reg_read32,
497 .reg_read64 = nvmf_reg_read64,
498 .reg_write32 = nvmf_reg_write32,
499 .free_ctrl = nvme_loop_free_ctrl,
500 .submit_async_event = nvme_loop_submit_async_event,
501 .delete_ctrl = nvme_loop_delete_ctrl_host,
502 .get_address = nvmf_get_address,
503 };
504
505 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
506 {
507 int ret;
508
509 ret = nvme_loop_init_io_queues(ctrl);
510 if (ret)
511 return ret;
512
513 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
514 ctrl->tag_set.ops = &nvme_loop_mq_ops;
515 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
516 ctrl->tag_set.reserved_tags = 1;
517 ctrl->tag_set.numa_node = NUMA_NO_NODE;
518 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
519 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
520 SG_CHUNK_SIZE * sizeof(struct scatterlist);
521 ctrl->tag_set.driver_data = ctrl;
522 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
523 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
524 ctrl->ctrl.tagset = &ctrl->tag_set;
525
526 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
527 if (ret)
528 goto out_destroy_queues;
529
530 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
531 if (IS_ERR(ctrl->ctrl.connect_q)) {
532 ret = PTR_ERR(ctrl->ctrl.connect_q);
533 goto out_free_tagset;
534 }
535
536 ret = nvme_loop_connect_io_queues(ctrl);
537 if (ret)
538 goto out_cleanup_connect_q;
539
540 return 0;
541
542 out_cleanup_connect_q:
543 blk_cleanup_queue(ctrl->ctrl.connect_q);
544 out_free_tagset:
545 blk_mq_free_tag_set(&ctrl->tag_set);
546 out_destroy_queues:
547 nvme_loop_destroy_io_queues(ctrl);
548 return ret;
549 }
550
551 static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
552 {
553 struct nvmet_port *p, *found = NULL;
554
555 mutex_lock(&nvme_loop_ports_mutex);
556 list_for_each_entry(p, &nvme_loop_ports, entry) {
557
558 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
559 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
560 continue;
561 found = p;
562 break;
563 }
564 mutex_unlock(&nvme_loop_ports_mutex);
565 return found;
566 }
567
568 static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
569 struct nvmf_ctrl_options *opts)
570 {
571 struct nvme_loop_ctrl *ctrl;
572 bool changed;
573 int ret;
574
575 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
576 if (!ctrl)
577 return ERR_PTR(-ENOMEM);
578 ctrl->ctrl.opts = opts;
579 INIT_LIST_HEAD(&ctrl->list);
580
581 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
582
583 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
584 0 );
585 if (ret)
586 goto out_put_ctrl;
587
588 ret = -ENOMEM;
589
590 ctrl->ctrl.sqsize = opts->queue_size - 1;
591 ctrl->ctrl.kato = opts->kato;
592 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
593
594 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
595 GFP_KERNEL);
596 if (!ctrl->queues)
597 goto out_uninit_ctrl;
598
599 ret = nvme_loop_configure_admin_queue(ctrl);
600 if (ret)
601 goto out_free_queues;
602
603 if (opts->queue_size > ctrl->ctrl.maxcmd) {
604
605 dev_warn(ctrl->ctrl.device,
606 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
607 opts->queue_size, ctrl->ctrl.maxcmd);
608 opts->queue_size = ctrl->ctrl.maxcmd;
609 }
610
611 if (opts->nr_io_queues) {
612 ret = nvme_loop_create_io_queues(ctrl);
613 if (ret)
614 goto out_remove_admin_queue;
615 }
616
617 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
618
619 dev_info(ctrl->ctrl.device,
620 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
621
622 nvme_get_ctrl(&ctrl->ctrl);
623
624 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
625 WARN_ON_ONCE(!changed);
626
627 mutex_lock(&nvme_loop_ctrl_mutex);
628 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
629 mutex_unlock(&nvme_loop_ctrl_mutex);
630
631 nvme_start_ctrl(&ctrl->ctrl);
632
633 return &ctrl->ctrl;
634
635 out_remove_admin_queue:
636 nvme_loop_destroy_admin_queue(ctrl);
637 out_free_queues:
638 kfree(ctrl->queues);
639 out_uninit_ctrl:
640 nvme_uninit_ctrl(&ctrl->ctrl);
641 out_put_ctrl:
642 nvme_put_ctrl(&ctrl->ctrl);
643 if (ret > 0)
644 ret = -EIO;
645 return ERR_PTR(ret);
646 }
647
648 static int nvme_loop_add_port(struct nvmet_port *port)
649 {
650 mutex_lock(&nvme_loop_ports_mutex);
651 list_add_tail(&port->entry, &nvme_loop_ports);
652 mutex_unlock(&nvme_loop_ports_mutex);
653 return 0;
654 }
655
656 static void nvme_loop_remove_port(struct nvmet_port *port)
657 {
658 mutex_lock(&nvme_loop_ports_mutex);
659 list_del_init(&port->entry);
660 mutex_unlock(&nvme_loop_ports_mutex);
661
662
663
664
665
666
667
668 flush_workqueue(nvme_delete_wq);
669 }
670
671 static const struct nvmet_fabrics_ops nvme_loop_ops = {
672 .owner = THIS_MODULE,
673 .type = NVMF_TRTYPE_LOOP,
674 .add_port = nvme_loop_add_port,
675 .remove_port = nvme_loop_remove_port,
676 .queue_response = nvme_loop_queue_response,
677 .delete_ctrl = nvme_loop_delete_ctrl,
678 };
679
680 static struct nvmf_transport_ops nvme_loop_transport = {
681 .name = "loop",
682 .module = THIS_MODULE,
683 .create_ctrl = nvme_loop_create_ctrl,
684 .allowed_opts = NVMF_OPT_TRADDR,
685 };
686
687 static int __init nvme_loop_init_module(void)
688 {
689 int ret;
690
691 ret = nvmet_register_transport(&nvme_loop_ops);
692 if (ret)
693 return ret;
694
695 ret = nvmf_register_transport(&nvme_loop_transport);
696 if (ret)
697 nvmet_unregister_transport(&nvme_loop_ops);
698
699 return ret;
700 }
701
702 static void __exit nvme_loop_cleanup_module(void)
703 {
704 struct nvme_loop_ctrl *ctrl, *next;
705
706 nvmf_unregister_transport(&nvme_loop_transport);
707 nvmet_unregister_transport(&nvme_loop_ops);
708
709 mutex_lock(&nvme_loop_ctrl_mutex);
710 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
711 nvme_delete_ctrl(&ctrl->ctrl);
712 mutex_unlock(&nvme_loop_ctrl_mutex);
713
714 flush_workqueue(nvme_delete_wq);
715 }
716
717 module_init(nvme_loop_init_module);
718 module_exit(nvme_loop_cleanup_module);
719
720 MODULE_LICENSE("GPL v2");
721 MODULE_ALIAS("nvmet-transport-254");