Lines Matching refs:ccp

46 static inline void ccp_add_device(struct ccp_device *ccp)  in ccp_add_device()  argument
48 ccp_dev = ccp; in ccp_add_device()
51 static inline void ccp_del_device(struct ccp_device *ccp) in ccp_del_device() argument
93 struct ccp_device *ccp = ccp_get_device(); in ccp_enqueue_cmd() local
98 if (!ccp) in ccp_enqueue_cmd()
105 cmd->ccp = ccp; in ccp_enqueue_cmd()
107 spin_lock_irqsave(&ccp->cmd_lock, flags); in ccp_enqueue_cmd()
109 i = ccp->cmd_q_count; in ccp_enqueue_cmd()
111 if (ccp->cmd_count >= MAX_CMD_QLEN) { in ccp_enqueue_cmd()
114 list_add_tail(&cmd->entry, &ccp->backlog); in ccp_enqueue_cmd()
117 ccp->cmd_count++; in ccp_enqueue_cmd()
118 list_add_tail(&cmd->entry, &ccp->cmd); in ccp_enqueue_cmd()
121 if (!ccp->suspending) { in ccp_enqueue_cmd()
122 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_enqueue_cmd()
123 if (ccp->cmd_q[i].active) in ccp_enqueue_cmd()
131 spin_unlock_irqrestore(&ccp->cmd_lock, flags); in ccp_enqueue_cmd()
134 if (i < ccp->cmd_q_count) in ccp_enqueue_cmd()
135 wake_up_process(ccp->cmd_q[i].kthread); in ccp_enqueue_cmd()
144 struct ccp_device *ccp = cmd->ccp; in ccp_do_cmd_backlog() local
150 spin_lock_irqsave(&ccp->cmd_lock, flags); in ccp_do_cmd_backlog()
152 ccp->cmd_count++; in ccp_do_cmd_backlog()
153 list_add_tail(&cmd->entry, &ccp->cmd); in ccp_do_cmd_backlog()
156 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_do_cmd_backlog()
157 if (ccp->cmd_q[i].active) in ccp_do_cmd_backlog()
163 spin_unlock_irqrestore(&ccp->cmd_lock, flags); in ccp_do_cmd_backlog()
166 if (i < ccp->cmd_q_count) in ccp_do_cmd_backlog()
167 wake_up_process(ccp->cmd_q[i].kthread); in ccp_do_cmd_backlog()
172 struct ccp_device *ccp = cmd_q->ccp; in ccp_dequeue_cmd() local
177 spin_lock_irqsave(&ccp->cmd_lock, flags); in ccp_dequeue_cmd()
181 if (ccp->suspending) { in ccp_dequeue_cmd()
184 spin_unlock_irqrestore(&ccp->cmd_lock, flags); in ccp_dequeue_cmd()
185 wake_up_interruptible(&ccp->suspend_queue); in ccp_dequeue_cmd()
190 if (ccp->cmd_count) { in ccp_dequeue_cmd()
193 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); in ccp_dequeue_cmd()
196 ccp->cmd_count--; in ccp_dequeue_cmd()
199 if (!list_empty(&ccp->backlog)) { in ccp_dequeue_cmd()
200 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, in ccp_dequeue_cmd()
205 spin_unlock_irqrestore(&ccp->cmd_lock, flags); in ccp_dequeue_cmd()
262 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); in ccp_trng_read() local
270 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); in ccp_trng_read()
276 if (ccp->hwrng_retries++ > TRNG_RETRIES) in ccp_trng_read()
283 ccp->hwrng_retries = 0; in ccp_trng_read()
296 struct ccp_device *ccp; in ccp_alloc_struct() local
298 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); in ccp_alloc_struct()
299 if (!ccp) in ccp_alloc_struct()
301 ccp->dev = dev; in ccp_alloc_struct()
303 INIT_LIST_HEAD(&ccp->cmd); in ccp_alloc_struct()
304 INIT_LIST_HEAD(&ccp->backlog); in ccp_alloc_struct()
306 spin_lock_init(&ccp->cmd_lock); in ccp_alloc_struct()
307 mutex_init(&ccp->req_mutex); in ccp_alloc_struct()
308 mutex_init(&ccp->ksb_mutex); in ccp_alloc_struct()
309 ccp->ksb_count = KSB_COUNT; in ccp_alloc_struct()
310 ccp->ksb_start = 0; in ccp_alloc_struct()
312 return ccp; in ccp_alloc_struct()
320 int ccp_init(struct ccp_device *ccp) in ccp_init() argument
322 struct device *dev = ccp->dev; in ccp_init()
331 qmr = ioread32(ccp->io_regs + Q_MASK_REG); in ccp_init()
347 cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; in ccp_init()
348 ccp->cmd_q_count++; in ccp_init()
350 cmd_q->ccp = ccp; in ccp_init()
355 cmd_q->ksb_key = KSB_START + ccp->ksb_start++; in ccp_init()
356 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++; in ccp_init()
357 ccp->ksb_count -= 2; in ccp_init()
362 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE + in ccp_init()
364 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE + in ccp_init()
378 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + in ccp_init()
384 if (ccp->cmd_q_count == 0) { in ccp_init()
389 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); in ccp_init()
392 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); in ccp_init()
393 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_init()
394 cmd_q = &ccp->cmd_q[i]; in ccp_init()
399 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); in ccp_init()
402 ret = ccp->get_irq(ccp); in ccp_init()
409 init_waitqueue_head(&ccp->ksb_queue); in ccp_init()
410 init_waitqueue_head(&ccp->suspend_queue); in ccp_init()
413 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_init()
416 cmd_q = &ccp->cmd_q[i]; in ccp_init()
432 ccp->hwrng.name = "ccp-rng"; in ccp_init()
433 ccp->hwrng.read = ccp_trng_read; in ccp_init()
434 ret = hwrng_register(&ccp->hwrng); in ccp_init()
441 ccp_add_device(ccp); in ccp_init()
444 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG); in ccp_init()
449 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_init()
450 if (ccp->cmd_q[i].kthread) in ccp_init()
451 kthread_stop(ccp->cmd_q[i].kthread); in ccp_init()
453 ccp->free_irq(ccp); in ccp_init()
456 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_init()
457 dma_pool_destroy(ccp->cmd_q[i].dma_pool); in ccp_init()
467 void ccp_destroy(struct ccp_device *ccp) in ccp_destroy() argument
474 ccp_del_device(ccp); in ccp_destroy()
477 hwrng_unregister(&ccp->hwrng); in ccp_destroy()
480 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_destroy()
481 if (ccp->cmd_q[i].kthread) in ccp_destroy()
482 kthread_stop(ccp->cmd_q[i].kthread); in ccp_destroy()
486 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_destroy()
487 cmd_q = &ccp->cmd_q[i]; in ccp_destroy()
492 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); in ccp_destroy()
493 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_destroy()
494 cmd_q = &ccp->cmd_q[i]; in ccp_destroy()
499 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG); in ccp_destroy()
501 ccp->free_irq(ccp); in ccp_destroy()
503 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_destroy()
504 dma_pool_destroy(ccp->cmd_q[i].dma_pool); in ccp_destroy()
507 while (!list_empty(&ccp->cmd)) { in ccp_destroy()
509 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); in ccp_destroy()
513 while (!list_empty(&ccp->backlog)) { in ccp_destroy()
515 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); in ccp_destroy()
530 struct ccp_device *ccp = dev_get_drvdata(dev); in ccp_irq_handler() local
535 status = ioread32(ccp->io_regs + IRQ_STATUS_REG); in ccp_irq_handler()
537 for (i = 0; i < ccp->cmd_q_count; i++) { in ccp_irq_handler()
538 cmd_q = &ccp->cmd_q[i]; in ccp_irq_handler()
553 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); in ccp_irq_handler()
562 bool ccp_queues_suspended(struct ccp_device *ccp) in ccp_queues_suspended() argument
568 spin_lock_irqsave(&ccp->cmd_lock, flags); in ccp_queues_suspended()
570 for (i = 0; i < ccp->cmd_q_count; i++) in ccp_queues_suspended()
571 if (ccp->cmd_q[i].suspended) in ccp_queues_suspended()
574 spin_unlock_irqrestore(&ccp->cmd_lock, flags); in ccp_queues_suspended()
576 return ccp->cmd_q_count == suspended; in ccp_queues_suspended()