This source file includes following definitions.
- errno_to_nvme_status
- nvmet_copy_to_sgl
- nvmet_copy_from_sgl
- nvmet_zero_sgl
- nvmet_max_nsid
- nvmet_async_event_result
- nvmet_async_events_free
- nvmet_async_event_work
- nvmet_add_async_event
- nvmet_add_to_changed_ns_log
- nvmet_ns_changed
- nvmet_send_ana_event
- nvmet_port_send_ana_event
- nvmet_register_transport
- nvmet_unregister_transport
- nvmet_port_del_ctrls
- nvmet_enable_port
- nvmet_disable_port
- nvmet_keep_alive_timer
- nvmet_start_keep_alive_timer
- nvmet_stop_keep_alive_timer
- __nvmet_find_namespace
- nvmet_find_namespace
- nvmet_destroy_namespace
- nvmet_put_namespace
- nvmet_ns_dev_disable
- nvmet_p2pmem_ns_enable
- nvmet_p2pmem_ns_add_p2p
- nvmet_ns_enable
- nvmet_ns_disable
- nvmet_ns_free
- nvmet_ns_alloc
- nvmet_update_sq_head
- nvmet_set_error
- __nvmet_req_complete
- nvmet_req_complete
- nvmet_cq_setup
- nvmet_sq_setup
- nvmet_confirm_sq
- nvmet_sq_destroy
- nvmet_sq_free
- nvmet_sq_init
- nvmet_check_ana_state
- nvmet_io_cmd_check_access
- nvmet_parse_io_cmd
- nvmet_req_init
- nvmet_req_uninit
- nvmet_req_execute
- nvmet_req_alloc_sgl
- nvmet_req_free_sgl
- nvmet_cc_en
- nvmet_cc_css
- nvmet_cc_mps
- nvmet_cc_ams
- nvmet_cc_shn
- nvmet_cc_iosqes
- nvmet_cc_iocqes
- nvmet_start_ctrl
- nvmet_clear_ctrl
- nvmet_update_cc
- nvmet_init_cap
- nvmet_ctrl_find_get
- nvmet_check_ctrl_status
- nvmet_host_allowed
- nvmet_setup_p2p_ns_map
- nvmet_release_p2p_ns_map
- nvmet_fatal_error_handler
- nvmet_alloc_ctrl
- nvmet_ctrl_free
- nvmet_ctrl_put
- nvmet_ctrl_fatal_error
- nvmet_find_get_subsys
- nvmet_subsys_alloc
- nvmet_subsys_free
- nvmet_subsys_del_ctrls
- nvmet_subsys_put
- nvmet_init
- nvmet_exit
1
2
3
4
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15
16 #include "nvmet.h"
17
18 struct workqueue_struct *buffered_io_wq;
19 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 DECLARE_RWSEM(nvmet_config_sem);
39
40 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
41 u64 nvmet_ana_chgcnt;
42 DECLARE_RWSEM(nvmet_ana_sem);
43
44 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
45 {
46 u16 status;
47
48 switch (errno) {
49 case 0:
50 status = NVME_SC_SUCCESS;
51 break;
52 case -ENOSPC:
53 req->error_loc = offsetof(struct nvme_rw_command, length);
54 status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
55 break;
56 case -EREMOTEIO:
57 req->error_loc = offsetof(struct nvme_rw_command, slba);
58 status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
59 break;
60 case -EOPNOTSUPP:
61 req->error_loc = offsetof(struct nvme_common_command, opcode);
62 switch (req->cmd->common.opcode) {
63 case nvme_cmd_dsm:
64 case nvme_cmd_write_zeroes:
65 status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
66 break;
67 default:
68 status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
69 }
70 break;
71 case -ENODATA:
72 req->error_loc = offsetof(struct nvme_rw_command, nsid);
73 status = NVME_SC_ACCESS_DENIED;
74 break;
75 case -EIO:
76
77 default:
78 req->error_loc = offsetof(struct nvme_common_command, opcode);
79 status = NVME_SC_INTERNAL | NVME_SC_DNR;
80 }
81
82 return status;
83 }
84
85 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
86 const char *subsysnqn);
87
88 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
89 size_t len)
90 {
91 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
92 req->error_loc = offsetof(struct nvme_common_command, dptr);
93 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
94 }
95 return 0;
96 }
97
98 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
99 {
100 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
101 req->error_loc = offsetof(struct nvme_common_command, dptr);
102 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
103 }
104 return 0;
105 }
106
107 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
108 {
109 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
110 req->error_loc = offsetof(struct nvme_common_command, dptr);
111 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
112 }
113 return 0;
114 }
115
116 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
117 {
118 struct nvmet_ns *ns;
119
120 if (list_empty(&subsys->namespaces))
121 return 0;
122
123 ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
124 return ns->nsid;
125 }
126
127 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
128 {
129 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
130 }
131
132 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
133 {
134 struct nvmet_req *req;
135
136 while (1) {
137 mutex_lock(&ctrl->lock);
138 if (!ctrl->nr_async_event_cmds) {
139 mutex_unlock(&ctrl->lock);
140 return;
141 }
142
143 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
144 mutex_unlock(&ctrl->lock);
145 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
146 }
147 }
148
149 static void nvmet_async_event_work(struct work_struct *work)
150 {
151 struct nvmet_ctrl *ctrl =
152 container_of(work, struct nvmet_ctrl, async_event_work);
153 struct nvmet_async_event *aen;
154 struct nvmet_req *req;
155
156 while (1) {
157 mutex_lock(&ctrl->lock);
158 aen = list_first_entry_or_null(&ctrl->async_events,
159 struct nvmet_async_event, entry);
160 if (!aen || !ctrl->nr_async_event_cmds) {
161 mutex_unlock(&ctrl->lock);
162 return;
163 }
164
165 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
166 nvmet_set_result(req, nvmet_async_event_result(aen));
167
168 list_del(&aen->entry);
169 kfree(aen);
170
171 mutex_unlock(&ctrl->lock);
172 nvmet_req_complete(req, 0);
173 }
174 }
175
176 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
177 u8 event_info, u8 log_page)
178 {
179 struct nvmet_async_event *aen;
180
181 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
182 if (!aen)
183 return;
184
185 aen->event_type = event_type;
186 aen->event_info = event_info;
187 aen->log_page = log_page;
188
189 mutex_lock(&ctrl->lock);
190 list_add_tail(&aen->entry, &ctrl->async_events);
191 mutex_unlock(&ctrl->lock);
192
193 schedule_work(&ctrl->async_event_work);
194 }
195
196 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
197 {
198 u32 i;
199
200 mutex_lock(&ctrl->lock);
201 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
202 goto out_unlock;
203
204 for (i = 0; i < ctrl->nr_changed_ns; i++) {
205 if (ctrl->changed_ns_list[i] == nsid)
206 goto out_unlock;
207 }
208
209 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
210 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
211 ctrl->nr_changed_ns = U32_MAX;
212 goto out_unlock;
213 }
214
215 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
216 out_unlock:
217 mutex_unlock(&ctrl->lock);
218 }
219
220 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
221 {
222 struct nvmet_ctrl *ctrl;
223
224 lockdep_assert_held(&subsys->lock);
225
226 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
227 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
228 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
229 continue;
230 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
231 NVME_AER_NOTICE_NS_CHANGED,
232 NVME_LOG_CHANGED_NS);
233 }
234 }
235
236 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
237 struct nvmet_port *port)
238 {
239 struct nvmet_ctrl *ctrl;
240
241 mutex_lock(&subsys->lock);
242 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
243 if (port && ctrl->port != port)
244 continue;
245 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
246 continue;
247 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
248 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
249 }
250 mutex_unlock(&subsys->lock);
251 }
252
253 void nvmet_port_send_ana_event(struct nvmet_port *port)
254 {
255 struct nvmet_subsys_link *p;
256
257 down_read(&nvmet_config_sem);
258 list_for_each_entry(p, &port->subsystems, entry)
259 nvmet_send_ana_event(p->subsys, port);
260 up_read(&nvmet_config_sem);
261 }
262
263 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
264 {
265 int ret = 0;
266
267 down_write(&nvmet_config_sem);
268 if (nvmet_transports[ops->type])
269 ret = -EINVAL;
270 else
271 nvmet_transports[ops->type] = ops;
272 up_write(&nvmet_config_sem);
273
274 return ret;
275 }
276 EXPORT_SYMBOL_GPL(nvmet_register_transport);
277
278 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
279 {
280 down_write(&nvmet_config_sem);
281 nvmet_transports[ops->type] = NULL;
282 up_write(&nvmet_config_sem);
283 }
284 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
285
286 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
287 {
288 struct nvmet_ctrl *ctrl;
289
290 mutex_lock(&subsys->lock);
291 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
292 if (ctrl->port == port)
293 ctrl->ops->delete_ctrl(ctrl);
294 }
295 mutex_unlock(&subsys->lock);
296 }
297
298 int nvmet_enable_port(struct nvmet_port *port)
299 {
300 const struct nvmet_fabrics_ops *ops;
301 int ret;
302
303 lockdep_assert_held(&nvmet_config_sem);
304
305 ops = nvmet_transports[port->disc_addr.trtype];
306 if (!ops) {
307 up_write(&nvmet_config_sem);
308 request_module("nvmet-transport-%d", port->disc_addr.trtype);
309 down_write(&nvmet_config_sem);
310 ops = nvmet_transports[port->disc_addr.trtype];
311 if (!ops) {
312 pr_err("transport type %d not supported\n",
313 port->disc_addr.trtype);
314 return -EINVAL;
315 }
316 }
317
318 if (!try_module_get(ops->owner))
319 return -EINVAL;
320
321 ret = ops->add_port(port);
322 if (ret) {
323 module_put(ops->owner);
324 return ret;
325 }
326
327
328 if (port->inline_data_size < 0)
329 port->inline_data_size = 0;
330
331 port->enabled = true;
332 port->tr_ops = ops;
333 return 0;
334 }
335
336 void nvmet_disable_port(struct nvmet_port *port)
337 {
338 const struct nvmet_fabrics_ops *ops;
339
340 lockdep_assert_held(&nvmet_config_sem);
341
342 port->enabled = false;
343 port->tr_ops = NULL;
344
345 ops = nvmet_transports[port->disc_addr.trtype];
346 ops->remove_port(port);
347 module_put(ops->owner);
348 }
349
350 static void nvmet_keep_alive_timer(struct work_struct *work)
351 {
352 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
353 struct nvmet_ctrl, ka_work);
354 bool cmd_seen = ctrl->cmd_seen;
355
356 ctrl->cmd_seen = false;
357 if (cmd_seen) {
358 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
359 ctrl->cntlid);
360 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
361 return;
362 }
363
364 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
365 ctrl->cntlid, ctrl->kato);
366
367 nvmet_ctrl_fatal_error(ctrl);
368 }
369
370 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
371 {
372 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
373 ctrl->cntlid, ctrl->kato);
374
375 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
376 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
377 }
378
379 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
380 {
381 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
382
383 cancel_delayed_work_sync(&ctrl->ka_work);
384 }
385
386 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
387 __le32 nsid)
388 {
389 struct nvmet_ns *ns;
390
391 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
392 if (ns->nsid == le32_to_cpu(nsid))
393 return ns;
394 }
395
396 return NULL;
397 }
398
399 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
400 {
401 struct nvmet_ns *ns;
402
403 rcu_read_lock();
404 ns = __nvmet_find_namespace(ctrl, nsid);
405 if (ns)
406 percpu_ref_get(&ns->ref);
407 rcu_read_unlock();
408
409 return ns;
410 }
411
412 static void nvmet_destroy_namespace(struct percpu_ref *ref)
413 {
414 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
415
416 complete(&ns->disable_done);
417 }
418
419 void nvmet_put_namespace(struct nvmet_ns *ns)
420 {
421 percpu_ref_put(&ns->ref);
422 }
423
424 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
425 {
426 nvmet_bdev_ns_disable(ns);
427 nvmet_file_ns_disable(ns);
428 }
429
430 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
431 {
432 int ret;
433 struct pci_dev *p2p_dev;
434
435 if (!ns->use_p2pmem)
436 return 0;
437
438 if (!ns->bdev) {
439 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
440 return -EINVAL;
441 }
442
443 if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
444 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
445 ns->device_path);
446 return -EINVAL;
447 }
448
449 if (ns->p2p_dev) {
450 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
451 if (ret < 0)
452 return -EINVAL;
453 } else {
454
455
456
457
458
459
460
461 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
462 if (!p2p_dev) {
463 pr_err("no peer-to-peer memory is available for %s\n",
464 ns->device_path);
465 return -EINVAL;
466 }
467
468 pci_dev_put(p2p_dev);
469 }
470
471 return 0;
472 }
473
474
475
476
477 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
478 struct nvmet_ns *ns)
479 {
480 struct device *clients[2];
481 struct pci_dev *p2p_dev;
482 int ret;
483
484 if (!ctrl->p2p_client || !ns->use_p2pmem)
485 return;
486
487 if (ns->p2p_dev) {
488 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
489 if (ret < 0)
490 return;
491
492 p2p_dev = pci_dev_get(ns->p2p_dev);
493 } else {
494 clients[0] = ctrl->p2p_client;
495 clients[1] = nvmet_ns_dev(ns);
496
497 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
498 if (!p2p_dev) {
499 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
500 dev_name(ctrl->p2p_client), ns->device_path);
501 return;
502 }
503 }
504
505 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
506 if (ret < 0)
507 pci_dev_put(p2p_dev);
508
509 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
510 ns->nsid);
511 }
512
513 int nvmet_ns_enable(struct nvmet_ns *ns)
514 {
515 struct nvmet_subsys *subsys = ns->subsys;
516 struct nvmet_ctrl *ctrl;
517 int ret;
518
519 mutex_lock(&subsys->lock);
520 ret = 0;
521 if (ns->enabled)
522 goto out_unlock;
523
524 ret = -EMFILE;
525 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
526 goto out_unlock;
527
528 ret = nvmet_bdev_ns_enable(ns);
529 if (ret == -ENOTBLK)
530 ret = nvmet_file_ns_enable(ns);
531 if (ret)
532 goto out_unlock;
533
534 ret = nvmet_p2pmem_ns_enable(ns);
535 if (ret)
536 goto out_dev_disable;
537
538 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
539 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
540
541 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
542 0, GFP_KERNEL);
543 if (ret)
544 goto out_dev_put;
545
546 if (ns->nsid > subsys->max_nsid)
547 subsys->max_nsid = ns->nsid;
548
549
550
551
552
553 if (list_empty(&subsys->namespaces)) {
554 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
555 } else {
556 struct nvmet_ns *old;
557
558 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link,
559 lockdep_is_held(&subsys->lock)) {
560 BUG_ON(ns->nsid == old->nsid);
561 if (ns->nsid < old->nsid)
562 break;
563 }
564
565 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
566 }
567 subsys->nr_namespaces++;
568
569 nvmet_ns_changed(subsys, ns->nsid);
570 ns->enabled = true;
571 ret = 0;
572 out_unlock:
573 mutex_unlock(&subsys->lock);
574 return ret;
575 out_dev_put:
576 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
577 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
578 out_dev_disable:
579 nvmet_ns_dev_disable(ns);
580 goto out_unlock;
581 }
582
583 void nvmet_ns_disable(struct nvmet_ns *ns)
584 {
585 struct nvmet_subsys *subsys = ns->subsys;
586 struct nvmet_ctrl *ctrl;
587
588 mutex_lock(&subsys->lock);
589 if (!ns->enabled)
590 goto out_unlock;
591
592 ns->enabled = false;
593 list_del_rcu(&ns->dev_link);
594 if (ns->nsid == subsys->max_nsid)
595 subsys->max_nsid = nvmet_max_nsid(subsys);
596
597 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
598 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
599
600 mutex_unlock(&subsys->lock);
601
602
603
604
605
606
607
608
609
610 percpu_ref_kill(&ns->ref);
611 synchronize_rcu();
612 wait_for_completion(&ns->disable_done);
613 percpu_ref_exit(&ns->ref);
614
615 mutex_lock(&subsys->lock);
616
617 subsys->nr_namespaces--;
618 nvmet_ns_changed(subsys, ns->nsid);
619 nvmet_ns_dev_disable(ns);
620 out_unlock:
621 mutex_unlock(&subsys->lock);
622 }
623
624 void nvmet_ns_free(struct nvmet_ns *ns)
625 {
626 nvmet_ns_disable(ns);
627
628 down_write(&nvmet_ana_sem);
629 nvmet_ana_group_enabled[ns->anagrpid]--;
630 up_write(&nvmet_ana_sem);
631
632 kfree(ns->device_path);
633 kfree(ns);
634 }
635
636 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
637 {
638 struct nvmet_ns *ns;
639
640 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
641 if (!ns)
642 return NULL;
643
644 INIT_LIST_HEAD(&ns->dev_link);
645 init_completion(&ns->disable_done);
646
647 ns->nsid = nsid;
648 ns->subsys = subsys;
649
650 down_write(&nvmet_ana_sem);
651 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
652 nvmet_ana_group_enabled[ns->anagrpid]++;
653 up_write(&nvmet_ana_sem);
654
655 uuid_gen(&ns->uuid);
656 ns->buffered_io = false;
657
658 return ns;
659 }
660
661 static void nvmet_update_sq_head(struct nvmet_req *req)
662 {
663 if (req->sq->size) {
664 u32 old_sqhd, new_sqhd;
665
666 do {
667 old_sqhd = req->sq->sqhd;
668 new_sqhd = (old_sqhd + 1) % req->sq->size;
669 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
670 old_sqhd);
671 }
672 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
673 }
674
675 static void nvmet_set_error(struct nvmet_req *req, u16 status)
676 {
677 struct nvmet_ctrl *ctrl = req->sq->ctrl;
678 struct nvme_error_slot *new_error_slot;
679 unsigned long flags;
680
681 req->cqe->status = cpu_to_le16(status << 1);
682
683 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
684 return;
685
686 spin_lock_irqsave(&ctrl->error_lock, flags);
687 ctrl->err_counter++;
688 new_error_slot =
689 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
690
691 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
692 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
693 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
694 new_error_slot->status_field = cpu_to_le16(status << 1);
695 new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
696 new_error_slot->lba = cpu_to_le64(req->error_slba);
697 new_error_slot->nsid = req->cmd->common.nsid;
698 spin_unlock_irqrestore(&ctrl->error_lock, flags);
699
700
701 req->cqe->status |= cpu_to_le16(1 << 14);
702 }
703
704 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
705 {
706 if (!req->sq->sqhd_disabled)
707 nvmet_update_sq_head(req);
708 req->cqe->sq_id = cpu_to_le16(req->sq->qid);
709 req->cqe->command_id = req->cmd->common.command_id;
710
711 if (unlikely(status))
712 nvmet_set_error(req, status);
713
714 trace_nvmet_req_complete(req);
715
716 if (req->ns)
717 nvmet_put_namespace(req->ns);
718 req->ops->queue_response(req);
719 }
720
721 void nvmet_req_complete(struct nvmet_req *req, u16 status)
722 {
723 __nvmet_req_complete(req, status);
724 percpu_ref_put(&req->sq->ref);
725 }
726 EXPORT_SYMBOL_GPL(nvmet_req_complete);
727
728 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
729 u16 qid, u16 size)
730 {
731 cq->qid = qid;
732 cq->size = size;
733
734 ctrl->cqs[qid] = cq;
735 }
736
737 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
738 u16 qid, u16 size)
739 {
740 sq->sqhd = 0;
741 sq->qid = qid;
742 sq->size = size;
743
744 ctrl->sqs[qid] = sq;
745 }
746
747 static void nvmet_confirm_sq(struct percpu_ref *ref)
748 {
749 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
750
751 complete(&sq->confirm_done);
752 }
753
754 void nvmet_sq_destroy(struct nvmet_sq *sq)
755 {
756
757
758
759
760 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
761 nvmet_async_events_free(sq->ctrl);
762 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
763 wait_for_completion(&sq->confirm_done);
764 wait_for_completion(&sq->free_done);
765 percpu_ref_exit(&sq->ref);
766
767 if (sq->ctrl) {
768 nvmet_ctrl_put(sq->ctrl);
769 sq->ctrl = NULL;
770 }
771 }
772 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
773
774 static void nvmet_sq_free(struct percpu_ref *ref)
775 {
776 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
777
778 complete(&sq->free_done);
779 }
780
781 int nvmet_sq_init(struct nvmet_sq *sq)
782 {
783 int ret;
784
785 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
786 if (ret) {
787 pr_err("percpu_ref init failed!\n");
788 return ret;
789 }
790 init_completion(&sq->free_done);
791 init_completion(&sq->confirm_done);
792
793 return 0;
794 }
795 EXPORT_SYMBOL_GPL(nvmet_sq_init);
796
797 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
798 struct nvmet_ns *ns)
799 {
800 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
801
802 if (unlikely(state == NVME_ANA_INACCESSIBLE))
803 return NVME_SC_ANA_INACCESSIBLE;
804 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
805 return NVME_SC_ANA_PERSISTENT_LOSS;
806 if (unlikely(state == NVME_ANA_CHANGE))
807 return NVME_SC_ANA_TRANSITION;
808 return 0;
809 }
810
811 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
812 {
813 if (unlikely(req->ns->readonly)) {
814 switch (req->cmd->common.opcode) {
815 case nvme_cmd_read:
816 case nvme_cmd_flush:
817 break;
818 default:
819 return NVME_SC_NS_WRITE_PROTECTED;
820 }
821 }
822
823 return 0;
824 }
825
826 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
827 {
828 struct nvme_command *cmd = req->cmd;
829 u16 ret;
830
831 ret = nvmet_check_ctrl_status(req, cmd);
832 if (unlikely(ret))
833 return ret;
834
835 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
836 if (unlikely(!req->ns)) {
837 req->error_loc = offsetof(struct nvme_common_command, nsid);
838 return NVME_SC_INVALID_NS | NVME_SC_DNR;
839 }
840 ret = nvmet_check_ana_state(req->port, req->ns);
841 if (unlikely(ret)) {
842 req->error_loc = offsetof(struct nvme_common_command, nsid);
843 return ret;
844 }
845 ret = nvmet_io_cmd_check_access(req);
846 if (unlikely(ret)) {
847 req->error_loc = offsetof(struct nvme_common_command, nsid);
848 return ret;
849 }
850
851 if (req->ns->file)
852 return nvmet_file_parse_io_cmd(req);
853 else
854 return nvmet_bdev_parse_io_cmd(req);
855 }
856
857 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
858 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
859 {
860 u8 flags = req->cmd->common.flags;
861 u16 status;
862
863 req->cq = cq;
864 req->sq = sq;
865 req->ops = ops;
866 req->sg = NULL;
867 req->sg_cnt = 0;
868 req->transfer_len = 0;
869 req->cqe->status = 0;
870 req->cqe->sq_head = 0;
871 req->ns = NULL;
872 req->error_loc = NVMET_NO_ERROR_LOC;
873 req->error_slba = 0;
874
875 trace_nvmet_req_init(req, req->cmd);
876
877
878 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
879 req->error_loc = offsetof(struct nvme_common_command, flags);
880 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
881 goto fail;
882 }
883
884
885
886
887
888
889 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
890 req->error_loc = offsetof(struct nvme_common_command, flags);
891 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
892 goto fail;
893 }
894
895 if (unlikely(!req->sq->ctrl))
896
897 status = nvmet_parse_connect_cmd(req);
898 else if (likely(req->sq->qid != 0))
899 status = nvmet_parse_io_cmd(req);
900 else if (nvme_is_fabrics(req->cmd))
901 status = nvmet_parse_fabrics_cmd(req);
902 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
903 status = nvmet_parse_discovery_cmd(req);
904 else
905 status = nvmet_parse_admin_cmd(req);
906
907 if (status)
908 goto fail;
909
910 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
911 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
912 goto fail;
913 }
914
915 if (sq->ctrl)
916 sq->ctrl->cmd_seen = true;
917
918 return true;
919
920 fail:
921 __nvmet_req_complete(req, status);
922 return false;
923 }
924 EXPORT_SYMBOL_GPL(nvmet_req_init);
925
926 void nvmet_req_uninit(struct nvmet_req *req)
927 {
928 percpu_ref_put(&req->sq->ref);
929 if (req->ns)
930 nvmet_put_namespace(req->ns);
931 }
932 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
933
934 void nvmet_req_execute(struct nvmet_req *req)
935 {
936 if (unlikely(req->data_len != req->transfer_len)) {
937 req->error_loc = offsetof(struct nvme_common_command, dptr);
938 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
939 } else
940 req->execute(req);
941 }
942 EXPORT_SYMBOL_GPL(nvmet_req_execute);
943
944 int nvmet_req_alloc_sgl(struct nvmet_req *req)
945 {
946 struct pci_dev *p2p_dev = NULL;
947
948 if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
949 if (req->sq->ctrl && req->ns)
950 p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
951 req->ns->nsid);
952
953 req->p2p_dev = NULL;
954 if (req->sq->qid && p2p_dev) {
955 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
956 req->transfer_len);
957 if (req->sg) {
958 req->p2p_dev = p2p_dev;
959 return 0;
960 }
961 }
962
963
964
965
966
967 }
968
969 req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
970 if (!req->sg)
971 return -ENOMEM;
972
973 return 0;
974 }
975 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
976
977 void nvmet_req_free_sgl(struct nvmet_req *req)
978 {
979 if (req->p2p_dev)
980 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
981 else
982 sgl_free(req->sg);
983
984 req->sg = NULL;
985 req->sg_cnt = 0;
986 }
987 EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
988
989 static inline bool nvmet_cc_en(u32 cc)
990 {
991 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
992 }
993
994 static inline u8 nvmet_cc_css(u32 cc)
995 {
996 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
997 }
998
999 static inline u8 nvmet_cc_mps(u32 cc)
1000 {
1001 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1002 }
1003
1004 static inline u8 nvmet_cc_ams(u32 cc)
1005 {
1006 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1007 }
1008
1009 static inline u8 nvmet_cc_shn(u32 cc)
1010 {
1011 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1012 }
1013
1014 static inline u8 nvmet_cc_iosqes(u32 cc)
1015 {
1016 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1017 }
1018
1019 static inline u8 nvmet_cc_iocqes(u32 cc)
1020 {
1021 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1022 }
1023
1024 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1025 {
1026 lockdep_assert_held(&ctrl->lock);
1027
1028 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1029 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
1030 nvmet_cc_mps(ctrl->cc) != 0 ||
1031 nvmet_cc_ams(ctrl->cc) != 0 ||
1032 nvmet_cc_css(ctrl->cc) != 0) {
1033 ctrl->csts = NVME_CSTS_CFS;
1034 return;
1035 }
1036
1037 ctrl->csts = NVME_CSTS_RDY;
1038
1039
1040
1041
1042
1043
1044
1045 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1046 }
1047
1048 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1049 {
1050 lockdep_assert_held(&ctrl->lock);
1051
1052
1053 ctrl->csts &= ~NVME_CSTS_RDY;
1054 ctrl->cc = 0;
1055 }
1056
1057 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1058 {
1059 u32 old;
1060
1061 mutex_lock(&ctrl->lock);
1062 old = ctrl->cc;
1063 ctrl->cc = new;
1064
1065 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1066 nvmet_start_ctrl(ctrl);
1067 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1068 nvmet_clear_ctrl(ctrl);
1069 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1070 nvmet_clear_ctrl(ctrl);
1071 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1072 }
1073 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1074 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1075 mutex_unlock(&ctrl->lock);
1076 }
1077
1078 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1079 {
1080
1081 ctrl->cap = (1ULL << 37);
1082
1083 ctrl->cap |= (15ULL << 24);
1084
1085 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1086 }
1087
1088 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
1089 struct nvmet_req *req, struct nvmet_ctrl **ret)
1090 {
1091 struct nvmet_subsys *subsys;
1092 struct nvmet_ctrl *ctrl;
1093 u16 status = 0;
1094
1095 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1096 if (!subsys) {
1097 pr_warn("connect request for invalid subsystem %s!\n",
1098 subsysnqn);
1099 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1100 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1101 }
1102
1103 mutex_lock(&subsys->lock);
1104 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1105 if (ctrl->cntlid == cntlid) {
1106 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1107 pr_warn("hostnqn mismatch.\n");
1108 continue;
1109 }
1110 if (!kref_get_unless_zero(&ctrl->ref))
1111 continue;
1112
1113 *ret = ctrl;
1114 goto out;
1115 }
1116 }
1117
1118 pr_warn("could not find controller %d for subsys %s / host %s\n",
1119 cntlid, subsysnqn, hostnqn);
1120 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1121 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1122
1123 out:
1124 mutex_unlock(&subsys->lock);
1125 nvmet_subsys_put(subsys);
1126 return status;
1127 }
1128
1129 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
1130 {
1131 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1132 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1133 cmd->common.opcode, req->sq->qid);
1134 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1135 }
1136
1137 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1138 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1139 cmd->common.opcode, req->sq->qid);
1140 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1141 }
1142 return 0;
1143 }
1144
1145 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1146 {
1147 struct nvmet_host_link *p;
1148
1149 lockdep_assert_held(&nvmet_config_sem);
1150
1151 if (subsys->allow_any_host)
1152 return true;
1153
1154 if (subsys->type == NVME_NQN_DISC)
1155 return true;
1156
1157 list_for_each_entry(p, &subsys->hosts, entry) {
1158 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1159 return true;
1160 }
1161
1162 return false;
1163 }
1164
1165
1166
1167
1168 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1169 struct nvmet_req *req)
1170 {
1171 struct nvmet_ns *ns;
1172
1173 if (!req->p2p_client)
1174 return;
1175
1176 ctrl->p2p_client = get_device(req->p2p_client);
1177
1178 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link,
1179 lockdep_is_held(&ctrl->subsys->lock))
1180 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1181 }
1182
1183
1184
1185
1186 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1187 {
1188 struct radix_tree_iter iter;
1189 void __rcu **slot;
1190
1191 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1192 pci_dev_put(radix_tree_deref_slot(slot));
1193
1194 put_device(ctrl->p2p_client);
1195 }
1196
1197 static void nvmet_fatal_error_handler(struct work_struct *work)
1198 {
1199 struct nvmet_ctrl *ctrl =
1200 container_of(work, struct nvmet_ctrl, fatal_err_work);
1201
1202 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1203 ctrl->ops->delete_ctrl(ctrl);
1204 }
1205
1206 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1207 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1208 {
1209 struct nvmet_subsys *subsys;
1210 struct nvmet_ctrl *ctrl;
1211 int ret;
1212 u16 status;
1213
1214 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1215 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1216 if (!subsys) {
1217 pr_warn("connect request for invalid subsystem %s!\n",
1218 subsysnqn);
1219 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1220 goto out;
1221 }
1222
1223 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1224 down_read(&nvmet_config_sem);
1225 if (!nvmet_host_allowed(subsys, hostnqn)) {
1226 pr_info("connect by host %s for subsystem %s not allowed\n",
1227 hostnqn, subsysnqn);
1228 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1229 up_read(&nvmet_config_sem);
1230 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1231 goto out_put_subsystem;
1232 }
1233 up_read(&nvmet_config_sem);
1234
1235 status = NVME_SC_INTERNAL;
1236 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1237 if (!ctrl)
1238 goto out_put_subsystem;
1239 mutex_init(&ctrl->lock);
1240
1241 nvmet_init_cap(ctrl);
1242
1243 ctrl->port = req->port;
1244
1245 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1246 INIT_LIST_HEAD(&ctrl->async_events);
1247 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1248 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1249
1250 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1251 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1252
1253 kref_init(&ctrl->ref);
1254 ctrl->subsys = subsys;
1255 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1256
1257 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1258 sizeof(__le32), GFP_KERNEL);
1259 if (!ctrl->changed_ns_list)
1260 goto out_free_ctrl;
1261
1262 ctrl->cqs = kcalloc(subsys->max_qid + 1,
1263 sizeof(struct nvmet_cq *),
1264 GFP_KERNEL);
1265 if (!ctrl->cqs)
1266 goto out_free_changed_ns_list;
1267
1268 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1269 sizeof(struct nvmet_sq *),
1270 GFP_KERNEL);
1271 if (!ctrl->sqs)
1272 goto out_free_cqs;
1273
1274 ret = ida_simple_get(&cntlid_ida,
1275 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
1276 GFP_KERNEL);
1277 if (ret < 0) {
1278 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1279 goto out_free_sqs;
1280 }
1281 ctrl->cntlid = ret;
1282
1283 ctrl->ops = req->ops;
1284
1285
1286
1287
1288
1289 if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1290 kato = NVMET_DISC_KATO_MS;
1291
1292
1293 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1294
1295 ctrl->err_counter = 0;
1296 spin_lock_init(&ctrl->error_lock);
1297
1298 nvmet_start_keep_alive_timer(ctrl);
1299
1300 mutex_lock(&subsys->lock);
1301 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1302 nvmet_setup_p2p_ns_map(ctrl, req);
1303 mutex_unlock(&subsys->lock);
1304
1305 *ctrlp = ctrl;
1306 return 0;
1307
1308 out_free_sqs:
1309 kfree(ctrl->sqs);
1310 out_free_cqs:
1311 kfree(ctrl->cqs);
1312 out_free_changed_ns_list:
1313 kfree(ctrl->changed_ns_list);
1314 out_free_ctrl:
1315 kfree(ctrl);
1316 out_put_subsystem:
1317 nvmet_subsys_put(subsys);
1318 out:
1319 return status;
1320 }
1321
1322 static void nvmet_ctrl_free(struct kref *ref)
1323 {
1324 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1325 struct nvmet_subsys *subsys = ctrl->subsys;
1326
1327 mutex_lock(&subsys->lock);
1328 nvmet_release_p2p_ns_map(ctrl);
1329 list_del(&ctrl->subsys_entry);
1330 mutex_unlock(&subsys->lock);
1331
1332 nvmet_stop_keep_alive_timer(ctrl);
1333
1334 flush_work(&ctrl->async_event_work);
1335 cancel_work_sync(&ctrl->fatal_err_work);
1336
1337 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1338
1339 kfree(ctrl->sqs);
1340 kfree(ctrl->cqs);
1341 kfree(ctrl->changed_ns_list);
1342 kfree(ctrl);
1343
1344 nvmet_subsys_put(subsys);
1345 }
1346
1347 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1348 {
1349 kref_put(&ctrl->ref, nvmet_ctrl_free);
1350 }
1351
1352 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1353 {
1354 mutex_lock(&ctrl->lock);
1355 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1356 ctrl->csts |= NVME_CSTS_CFS;
1357 schedule_work(&ctrl->fatal_err_work);
1358 }
1359 mutex_unlock(&ctrl->lock);
1360 }
1361 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1362
1363 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1364 const char *subsysnqn)
1365 {
1366 struct nvmet_subsys_link *p;
1367
1368 if (!port)
1369 return NULL;
1370
1371 if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1372 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1373 return NULL;
1374 return nvmet_disc_subsys;
1375 }
1376
1377 down_read(&nvmet_config_sem);
1378 list_for_each_entry(p, &port->subsystems, entry) {
1379 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1380 NVMF_NQN_SIZE)) {
1381 if (!kref_get_unless_zero(&p->subsys->ref))
1382 break;
1383 up_read(&nvmet_config_sem);
1384 return p->subsys;
1385 }
1386 }
1387 up_read(&nvmet_config_sem);
1388 return NULL;
1389 }
1390
1391 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1392 enum nvme_subsys_type type)
1393 {
1394 struct nvmet_subsys *subsys;
1395
1396 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1397 if (!subsys)
1398 return ERR_PTR(-ENOMEM);
1399
1400 subsys->ver = NVME_VS(1, 3, 0);
1401
1402 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1403
1404 switch (type) {
1405 case NVME_NQN_NVME:
1406 subsys->max_qid = NVMET_NR_QUEUES;
1407 break;
1408 case NVME_NQN_DISC:
1409 subsys->max_qid = 0;
1410 break;
1411 default:
1412 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1413 kfree(subsys);
1414 return ERR_PTR(-EINVAL);
1415 }
1416 subsys->type = type;
1417 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1418 GFP_KERNEL);
1419 if (!subsys->subsysnqn) {
1420 kfree(subsys);
1421 return ERR_PTR(-ENOMEM);
1422 }
1423
1424 kref_init(&subsys->ref);
1425
1426 mutex_init(&subsys->lock);
1427 INIT_LIST_HEAD(&subsys->namespaces);
1428 INIT_LIST_HEAD(&subsys->ctrls);
1429 INIT_LIST_HEAD(&subsys->hosts);
1430
1431 return subsys;
1432 }
1433
1434 static void nvmet_subsys_free(struct kref *ref)
1435 {
1436 struct nvmet_subsys *subsys =
1437 container_of(ref, struct nvmet_subsys, ref);
1438
1439 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1440
1441 kfree(subsys->subsysnqn);
1442 kfree(subsys);
1443 }
1444
1445 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1446 {
1447 struct nvmet_ctrl *ctrl;
1448
1449 mutex_lock(&subsys->lock);
1450 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1451 ctrl->ops->delete_ctrl(ctrl);
1452 mutex_unlock(&subsys->lock);
1453 }
1454
1455 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1456 {
1457 kref_put(&subsys->ref, nvmet_subsys_free);
1458 }
1459
1460 static int __init nvmet_init(void)
1461 {
1462 int error;
1463
1464 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1465
1466 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1467 WQ_MEM_RECLAIM, 0);
1468 if (!buffered_io_wq) {
1469 error = -ENOMEM;
1470 goto out;
1471 }
1472
1473 error = nvmet_init_discovery();
1474 if (error)
1475 goto out_free_work_queue;
1476
1477 error = nvmet_init_configfs();
1478 if (error)
1479 goto out_exit_discovery;
1480 return 0;
1481
1482 out_exit_discovery:
1483 nvmet_exit_discovery();
1484 out_free_work_queue:
1485 destroy_workqueue(buffered_io_wq);
1486 out:
1487 return error;
1488 }
1489
1490 static void __exit nvmet_exit(void)
1491 {
1492 nvmet_exit_configfs();
1493 nvmet_exit_discovery();
1494 ida_destroy(&cntlid_ida);
1495 destroy_workqueue(buffered_io_wq);
1496
1497 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1498 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1499 }
1500
1501 module_init(nvmet_init);
1502 module_exit(nvmet_exit);
1503
1504 MODULE_LICENSE("GPL v2");