Lines Matching refs:dev

74 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,  in nvm_dev_dma_alloc()  argument
77 return dev->ops->dev_dma_alloc(dev, dev->ppalist_pool, mem_flags, in nvm_dev_dma_alloc()
82 void nvm_dev_dma_free(struct nvm_dev *dev, void *ppa_list, in nvm_dev_dma_free() argument
85 dev->ops->dev_dma_free(dev->ppalist_pool, ppa_list, dma_handler); in nvm_dev_dma_free()
100 struct nvmm_type *nvm_init_mgr(struct nvm_dev *dev) in nvm_init_mgr() argument
108 ret = mt->register_mgr(dev); in nvm_init_mgr()
111 ret, dev->name); in nvm_init_mgr()
122 struct nvm_dev *dev; in nvm_register_mgr() local
134 list_for_each_entry(dev, &nvm_devices, devices) { in nvm_register_mgr()
135 if (dev->mt) in nvm_register_mgr()
138 dev->mt = nvm_init_mgr(dev); in nvm_register_mgr()
160 struct nvm_dev *dev; in nvm_find_nvm_dev() local
162 list_for_each_entry(dev, &nvm_devices, devices) in nvm_find_nvm_dev()
163 if (!strcmp(name, dev->name)) in nvm_find_nvm_dev()
164 return dev; in nvm_find_nvm_dev()
169 struct nvm_block *nvm_get_blk(struct nvm_dev *dev, struct nvm_lun *lun, in nvm_get_blk() argument
172 return dev->mt->get_blk(dev, lun, flags); in nvm_get_blk()
177 void nvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) in nvm_put_blk() argument
179 return dev->mt->put_blk(dev, blk); in nvm_put_blk()
183 int nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) in nvm_submit_io() argument
185 return dev->mt->submit_io(dev, rqd); in nvm_submit_io()
189 int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) in nvm_erase_blk() argument
191 return dev->mt->erase_blk(dev, blk, 0); in nvm_erase_blk()
195 static int nvm_core_init(struct nvm_dev *dev) in nvm_core_init() argument
197 struct nvm_id *id = &dev->identity; in nvm_core_init()
201 dev->nr_chnls = grp->num_ch; in nvm_core_init()
202 dev->luns_per_chnl = grp->num_lun; in nvm_core_init()
203 dev->pgs_per_blk = grp->num_pg; in nvm_core_init()
204 dev->blks_per_lun = grp->num_blk; in nvm_core_init()
205 dev->nr_planes = grp->num_pln; in nvm_core_init()
206 dev->sec_size = grp->csecs; in nvm_core_init()
207 dev->oob_size = grp->sos; in nvm_core_init()
208 dev->sec_per_pg = grp->fpg_sz / grp->csecs; in nvm_core_init()
209 memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); in nvm_core_init()
211 dev->plane_mode = NVM_PLANE_SINGLE; in nvm_core_init()
212 dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; in nvm_core_init()
225 dev->plane_mode = NVM_PLANE_DOUBLE; in nvm_core_init()
227 dev->plane_mode = NVM_PLANE_QUAD; in nvm_core_init()
230 dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes; in nvm_core_init()
231 dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk; in nvm_core_init()
232 dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun; in nvm_core_init()
233 dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls; in nvm_core_init()
235 dev->total_blocks = dev->nr_planes * in nvm_core_init()
236 dev->blks_per_lun * in nvm_core_init()
237 dev->luns_per_chnl * in nvm_core_init()
238 dev->nr_chnls; in nvm_core_init()
239 dev->total_pages = dev->total_blocks * dev->pgs_per_blk; in nvm_core_init()
240 INIT_LIST_HEAD(&dev->online_targets); in nvm_core_init()
245 static void nvm_free(struct nvm_dev *dev) in nvm_free() argument
247 if (!dev) in nvm_free()
250 if (dev->mt) in nvm_free()
251 dev->mt->unregister_mgr(dev); in nvm_free()
254 static int nvm_init(struct nvm_dev *dev) in nvm_init() argument
258 if (!dev->q || !dev->ops) in nvm_init()
261 if (dev->ops->identity(dev, &dev->identity)) { in nvm_init()
267 dev->identity.ver_id, dev->identity.vmnt, in nvm_init()
268 dev->identity.cgrps); in nvm_init()
270 if (dev->identity.ver_id != 1) { in nvm_init()
275 if (dev->identity.cgrps != 1) { in nvm_init()
280 ret = nvm_core_init(dev); in nvm_init()
287 dev->name, dev->sec_per_pg, dev->nr_planes, in nvm_init()
288 dev->pgs_per_blk, dev->blks_per_lun, dev->nr_luns, in nvm_init()
289 dev->nr_chnls); in nvm_init()
296 static void nvm_exit(struct nvm_dev *dev) in nvm_exit() argument
298 if (dev->ppalist_pool) in nvm_exit()
299 dev->ops->destroy_dma_pool(dev->ppalist_pool); in nvm_exit()
300 nvm_free(dev); in nvm_exit()
308 struct nvm_dev *dev; in nvm_register() local
314 dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL); in nvm_register()
315 if (!dev) in nvm_register()
318 dev->q = q; in nvm_register()
319 dev->ops = ops; in nvm_register()
320 strncpy(dev->name, disk_name, DISK_NAME_LEN); in nvm_register()
322 ret = nvm_init(dev); in nvm_register()
326 if (dev->ops->max_phys_sect > 256) { in nvm_register()
332 if (dev->ops->max_phys_sect > 1) { in nvm_register()
333 dev->ppalist_pool = dev->ops->create_dma_pool(dev, "ppalist"); in nvm_register()
334 if (!dev->ppalist_pool) { in nvm_register()
343 dev->mt = nvm_init_mgr(dev); in nvm_register()
344 list_add(&dev->devices, &nvm_devices); in nvm_register()
349 kfree(dev); in nvm_register()
356 struct nvm_dev *dev; in nvm_unregister() local
359 dev = nvm_find_nvm_dev(disk_name); in nvm_unregister()
360 if (!dev) { in nvm_unregister()
367 list_del(&dev->devices); in nvm_unregister()
370 nvm_exit(dev); in nvm_unregister()
371 kfree(dev); in nvm_unregister()
379 static int nvm_create_target(struct nvm_dev *dev, in nvm_create_target() argument
389 if (!dev->mt) { in nvm_create_target()
402 list_for_each_entry(t, &dev->online_targets, list) { in nvm_create_target()
415 tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); in nvm_create_target()
431 targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end); in nvm_create_target()
438 blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect); in nvm_create_target()
447 list_add_tail(&t->list, &dev->online_targets); in nvm_create_target()
482 struct nvm_dev *dev; in __nvm_configure_create() local
486 dev = nvm_find_nvm_dev(create->dev); in __nvm_configure_create()
488 if (!dev) { in __nvm_configure_create()
499 if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) { in __nvm_configure_create()
501 s->lun_begin, s->lun_end, dev->nr_luns); in __nvm_configure_create()
505 return nvm_create_target(dev, create); in __nvm_configure_create()
511 struct nvm_dev *dev; in __nvm_configure_remove() local
515 list_for_each_entry(dev, &nvm_devices, devices) in __nvm_configure_remove()
516 list_for_each_entry(t, &dev->online_targets, list) { in __nvm_configure_remove()
536 struct nvm_dev *dev; in nvm_configure_show() local
547 dev = nvm_find_nvm_dev(devname); in nvm_configure_show()
549 if (!dev) { in nvm_configure_show()
554 if (!dev->mt) in nvm_configure_show()
557 dev->mt->lun_info_print(dev); in nvm_configure_show()
585 ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev, in nvm_configure_create()
634 struct nvm_dev *dev; in nvm_configure_get() local
638 list_for_each_entry(dev, &nvm_devices, devices) { in nvm_configure_get()
641 buf += sprintf(buf, " %32s\n", dev->name); in nvm_configure_get()
705 struct nvm_dev *dev; in nvm_ioctl_get_devices() local
716 list_for_each_entry(dev, &nvm_devices, devices) { in nvm_ioctl_get_devices()
719 sprintf(info->devname, "%s", dev->name); in nvm_ioctl_get_devices()
720 if (dev->mt) { in nvm_ioctl_get_devices()
721 info->bmversion[0] = dev->mt->version[0]; in nvm_ioctl_get_devices()
722 info->bmversion[1] = dev->mt->version[1]; in nvm_ioctl_get_devices()
723 info->bmversion[2] = dev->mt->version[2]; in nvm_ioctl_get_devices()
724 sprintf(info->bmname, "%s", dev->mt->name); in nvm_ioctl_get_devices()
759 create.dev[DISK_NAME_LEN - 1] = '\0'; in nvm_ioctl_dev_create()