Lines Matching refs:bdev
148 static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) in scm_permit_request() argument
150 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; in scm_permit_request()
183 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_prepare() local
184 struct scm_device *scmdev = bdev->gendisk->private_data; in scm_request_prepare()
220 static inline void scm_request_init(struct scm_blk_dev *bdev, in scm_request_init() argument
229 aobrq->scmdev = bdev->scmdev; in scm_request_init()
232 scmrq->bdev = bdev; in scm_request_init()
240 static void scm_ensure_queue_restart(struct scm_blk_dev *bdev) in scm_ensure_queue_restart() argument
242 if (atomic_read(&bdev->queued_reqs)) { in scm_ensure_queue_restart()
246 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); in scm_ensure_queue_restart()
251 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_requeue() local
256 blk_requeue_request(bdev->rq, scmrq->request[i]); in scm_request_requeue()
258 atomic_dec(&bdev->queued_reqs); in scm_request_requeue()
260 scm_ensure_queue_restart(bdev); in scm_request_requeue()
265 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_finish() local
272 atomic_dec(&bdev->queued_reqs); in scm_request_finish()
278 struct scm_blk_dev *bdev = scmrq->bdev; in scm_request_start() local
281 atomic_inc(&bdev->queued_reqs); in scm_request_start()
298 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); in scm_blk_request() local
310 if (!scm_permit_request(bdev, req)) in scm_blk_request()
319 scm_request_init(bdev, scmrq); in scm_blk_request()
340 atomic_inc(&bdev->queued_reqs); in scm_blk_request()
367 scm_ensure_queue_restart(bdev); in scm_blk_request()
390 struct scm_blk_dev *bdev = scmrq->bdev; in scm_blk_irq() local
396 spin_lock(&bdev->lock); in scm_blk_irq()
397 list_add_tail(&scmrq->list, &bdev->finished_requests); in scm_blk_irq()
398 spin_unlock(&bdev->lock); in scm_blk_irq()
399 tasklet_hi_schedule(&bdev->tasklet); in scm_blk_irq()
404 struct scm_blk_dev *bdev = scmrq->bdev; in scm_blk_handle_error() local
413 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_handle_error()
414 if (bdev->state != SCM_WR_PROHIBIT) in scm_blk_handle_error()
416 (unsigned long) bdev->scmdev->address); in scm_blk_handle_error()
417 bdev->state = SCM_WR_PROHIBIT; in scm_blk_handle_error()
418 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_handle_error()
429 spin_lock_irqsave(&bdev->rq_lock, flags); in scm_blk_handle_error()
431 spin_unlock_irqrestore(&bdev->rq_lock, flags); in scm_blk_handle_error()
434 static void scm_blk_tasklet(struct scm_blk_dev *bdev) in scm_blk_tasklet() argument
439 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_tasklet()
440 while (!list_empty(&bdev->finished_requests)) { in scm_blk_tasklet()
441 scmrq = list_first_entry(&bdev->finished_requests, in scm_blk_tasklet()
444 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_tasklet()
450 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_tasklet()
456 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_tasklet()
461 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_tasklet()
463 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_tasklet()
465 blk_run_queue(bdev->rq); in scm_blk_tasklet()
472 int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) in scm_blk_dev_setup() argument
485 bdev->scmdev = scmdev; in scm_blk_dev_setup()
486 bdev->state = SCM_OPER; in scm_blk_dev_setup()
487 spin_lock_init(&bdev->rq_lock); in scm_blk_dev_setup()
488 spin_lock_init(&bdev->lock); in scm_blk_dev_setup()
489 INIT_LIST_HEAD(&bdev->finished_requests); in scm_blk_dev_setup()
490 atomic_set(&bdev->queued_reqs, 0); in scm_blk_dev_setup()
491 tasklet_init(&bdev->tasklet, in scm_blk_dev_setup()
493 (unsigned long) bdev); in scm_blk_dev_setup()
495 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); in scm_blk_dev_setup()
499 bdev->rq = rq; in scm_blk_dev_setup()
508 scm_blk_dev_cluster_setup(bdev); in scm_blk_dev_setup()
510 bdev->gendisk = alloc_disk(SCM_NR_PARTS); in scm_blk_dev_setup()
511 if (!bdev->gendisk) in scm_blk_dev_setup()
515 bdev->gendisk->driverfs_dev = &scmdev->dev; in scm_blk_dev_setup()
516 bdev->gendisk->private_data = scmdev; in scm_blk_dev_setup()
517 bdev->gendisk->fops = &scm_blk_devops; in scm_blk_dev_setup()
518 bdev->gendisk->queue = rq; in scm_blk_dev_setup()
519 bdev->gendisk->major = scm_major; in scm_blk_dev_setup()
520 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS; in scm_blk_dev_setup()
522 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm"); in scm_blk_dev_setup()
524 len += snprintf(bdev->gendisk->disk_name + len, in scm_blk_dev_setup()
529 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c", in scm_blk_dev_setup()
533 set_capacity(bdev->gendisk, scmdev->size >> 9); in scm_blk_dev_setup()
534 add_disk(bdev->gendisk); in scm_blk_dev_setup()
544 void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) in scm_blk_dev_cleanup() argument
546 tasklet_kill(&bdev->tasklet); in scm_blk_dev_cleanup()
547 del_gendisk(bdev->gendisk); in scm_blk_dev_cleanup()
548 blk_cleanup_queue(bdev->gendisk->queue); in scm_blk_dev_cleanup()
549 put_disk(bdev->gendisk); in scm_blk_dev_cleanup()
552 void scm_blk_set_available(struct scm_blk_dev *bdev) in scm_blk_set_available() argument
556 spin_lock_irqsave(&bdev->lock, flags); in scm_blk_set_available()
557 if (bdev->state == SCM_WR_PROHIBIT) in scm_blk_set_available()
559 (unsigned long) bdev->scmdev->address); in scm_blk_set_available()
560 bdev->state = SCM_OPER; in scm_blk_set_available()
561 spin_unlock_irqrestore(&bdev->lock, flags); in scm_blk_set_available()