This source file includes following definitions.
- fastrpc_free_map
- fastrpc_map_put
- fastrpc_map_get
- fastrpc_map_find
- fastrpc_buf_free
- fastrpc_buf_alloc
- fastrpc_channel_ctx_free
- fastrpc_channel_ctx_get
- fastrpc_channel_ctx_put
- fastrpc_context_free
- fastrpc_context_get
- fastrpc_context_put
- fastrpc_context_put_wq
- olaps_cmp
- fastrpc_get_buff_overlaps
- fastrpc_context_alloc
- fastrpc_map_dma_buf
- fastrpc_unmap_dma_buf
- fastrpc_release
- fastrpc_dma_buf_attach
- fastrpc_dma_buf_detatch
- fastrpc_kmap
- fastrpc_vmap
- fastrpc_mmap
- fastrpc_map_create
- fastrpc_get_meta_size
- fastrpc_get_payload_size
- fastrpc_create_maps
- fastrpc_get_args
- fastrpc_put_args
- fastrpc_invoke_send
- fastrpc_internal_invoke
- fastrpc_init_create_process
- fastrpc_session_alloc
- fastrpc_session_free
- fastrpc_release_current_dsp_process
- fastrpc_device_release
- fastrpc_device_open
- fastrpc_dmabuf_alloc
- fastrpc_init_attach
- fastrpc_invoke
- fastrpc_device_ioctl
- fastrpc_cb_probe
- fastrpc_cb_remove
- fastrpc_rpmsg_probe
- fastrpc_notify_users
- fastrpc_rpmsg_remove
- fastrpc_rpmsg_callback
- fastrpc_init
- fastrpc_exit
1
2
3
4
5 #include <linux/completion.h>
6 #include <linux/device.h>
7 #include <linux/dma-buf.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/idr.h>
10 #include <linux/list.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/of_address.h>
14 #include <linux/of.h>
15 #include <linux/sort.h>
16 #include <linux/of_platform.h>
17 #include <linux/rpmsg.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
20 #include <uapi/misc/fastrpc.h>
21
22 #define ADSP_DOMAIN_ID (0)
23 #define MDSP_DOMAIN_ID (1)
24 #define SDSP_DOMAIN_ID (2)
25 #define CDSP_DOMAIN_ID (3)
26 #define FASTRPC_DEV_MAX 4
27 #define FASTRPC_MAX_SESSIONS 9
28 #define FASTRPC_ALIGN 128
29 #define FASTRPC_MAX_FDLIST 16
30 #define FASTRPC_MAX_CRCLIST 64
31 #define FASTRPC_PHYS(p) ((p) & 0xffffffff)
32 #define FASTRPC_CTX_MAX (256)
33 #define FASTRPC_INIT_HANDLE 1
34 #define FASTRPC_CTXID_MASK (0xFF0)
35 #define INIT_FILELEN_MAX (64 * 1024 * 1024)
36 #define FASTRPC_DEVICE_NAME "fastrpc"
37
38
39 #define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
40
41
42 #define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
43
44
45 #define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
46
47
48 #define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
49
50 #define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
51 REMOTE_SCALARS_OUTBUFS(sc) + \
52 REMOTE_SCALARS_INHANDLES(sc)+ \
53 REMOTE_SCALARS_OUTHANDLES(sc))
54 #define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
55 (((attr & 0x07) << 29) | \
56 ((method & 0x1f) << 24) | \
57 ((in & 0xff) << 16) | \
58 ((out & 0xff) << 8) | \
59 ((oin & 0x0f) << 4) | \
60 (oout & 0x0f))
61
62 #define FASTRPC_SCALARS(method, in, out) \
63 FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
64
65 #define FASTRPC_CREATE_PROCESS_NARGS 6
66
67 #define FASTRPC_RMID_INIT_ATTACH 0
68 #define FASTRPC_RMID_INIT_RELEASE 1
69 #define FASTRPC_RMID_INIT_CREATE 6
70 #define FASTRPC_RMID_INIT_CREATE_ATTR 7
71 #define FASTRPC_RMID_INIT_CREATE_STATIC 8
72
73 #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev)
74
75 static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
76 "sdsp", "cdsp"};
77 struct fastrpc_phy_page {
78 u64 addr;
79 u64 size;
80 };
81
82 struct fastrpc_invoke_buf {
83 u32 num;
84 u32 pgidx;
85 };
86
87 struct fastrpc_remote_arg {
88 u64 pv;
89 u64 len;
90 };
91
92 struct fastrpc_msg {
93 int pid;
94 int tid;
95 u64 ctx;
96 u32 handle;
97 u32 sc;
98 u64 addr;
99 u64 size;
100 };
101
102 struct fastrpc_invoke_rsp {
103 u64 ctx;
104 int retval;
105 };
106
107 struct fastrpc_buf_overlap {
108 u64 start;
109 u64 end;
110 int raix;
111 u64 mstart;
112 u64 mend;
113 u64 offset;
114 };
115
116 struct fastrpc_buf {
117 struct fastrpc_user *fl;
118 struct dma_buf *dmabuf;
119 struct device *dev;
120 void *virt;
121 u64 phys;
122 u64 size;
123
124 struct mutex lock;
125 struct list_head attachments;
126 };
127
128 struct fastrpc_dma_buf_attachment {
129 struct device *dev;
130 struct sg_table sgt;
131 struct list_head node;
132 };
133
134 struct fastrpc_map {
135 struct list_head node;
136 struct fastrpc_user *fl;
137 int fd;
138 struct dma_buf *buf;
139 struct sg_table *table;
140 struct dma_buf_attachment *attach;
141 u64 phys;
142 u64 size;
143 void *va;
144 u64 len;
145 struct kref refcount;
146 };
147
148 struct fastrpc_invoke_ctx {
149 int nscalars;
150 int nbufs;
151 int retval;
152 int pid;
153 int tgid;
154 u32 sc;
155 u32 *crc;
156 u64 ctxid;
157 u64 msg_sz;
158 struct kref refcount;
159 struct list_head node;
160 struct completion work;
161 struct work_struct put_work;
162 struct fastrpc_msg msg;
163 struct fastrpc_user *fl;
164 struct fastrpc_remote_arg *rpra;
165 struct fastrpc_map **maps;
166 struct fastrpc_buf *buf;
167 struct fastrpc_invoke_args *args;
168 struct fastrpc_buf_overlap *olaps;
169 struct fastrpc_channel_ctx *cctx;
170 };
171
172 struct fastrpc_session_ctx {
173 struct device *dev;
174 int sid;
175 bool used;
176 bool valid;
177 };
178
179 struct fastrpc_channel_ctx {
180 int domain_id;
181 int sesscount;
182 struct rpmsg_device *rpdev;
183 struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
184 spinlock_t lock;
185 struct idr ctx_idr;
186 struct list_head users;
187 struct miscdevice miscdev;
188 struct kref refcount;
189 };
190
191 struct fastrpc_user {
192 struct list_head user;
193 struct list_head maps;
194 struct list_head pending;
195
196 struct fastrpc_channel_ctx *cctx;
197 struct fastrpc_session_ctx *sctx;
198 struct fastrpc_buf *init_mem;
199
200 int tgid;
201 int pd;
202
203 spinlock_t lock;
204
205 struct mutex mutex;
206 };
207
208 static void fastrpc_free_map(struct kref *ref)
209 {
210 struct fastrpc_map *map;
211
212 map = container_of(ref, struct fastrpc_map, refcount);
213
214 if (map->table) {
215 dma_buf_unmap_attachment(map->attach, map->table,
216 DMA_BIDIRECTIONAL);
217 dma_buf_detach(map->buf, map->attach);
218 dma_buf_put(map->buf);
219 }
220
221 kfree(map);
222 }
223
224 static void fastrpc_map_put(struct fastrpc_map *map)
225 {
226 if (map)
227 kref_put(&map->refcount, fastrpc_free_map);
228 }
229
230 static void fastrpc_map_get(struct fastrpc_map *map)
231 {
232 if (map)
233 kref_get(&map->refcount);
234 }
235
236 static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
237 struct fastrpc_map **ppmap)
238 {
239 struct fastrpc_map *map = NULL;
240
241 mutex_lock(&fl->mutex);
242 list_for_each_entry(map, &fl->maps, node) {
243 if (map->fd == fd) {
244 fastrpc_map_get(map);
245 *ppmap = map;
246 mutex_unlock(&fl->mutex);
247 return 0;
248 }
249 }
250 mutex_unlock(&fl->mutex);
251
252 return -ENOENT;
253 }
254
255 static void fastrpc_buf_free(struct fastrpc_buf *buf)
256 {
257 dma_free_coherent(buf->dev, buf->size, buf->virt,
258 FASTRPC_PHYS(buf->phys));
259 kfree(buf);
260 }
261
262 static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
263 u64 size, struct fastrpc_buf **obuf)
264 {
265 struct fastrpc_buf *buf;
266
267 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
268 if (!buf)
269 return -ENOMEM;
270
271 INIT_LIST_HEAD(&buf->attachments);
272 mutex_init(&buf->lock);
273
274 buf->fl = fl;
275 buf->virt = NULL;
276 buf->phys = 0;
277 buf->size = size;
278 buf->dev = dev;
279
280 buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
281 GFP_KERNEL);
282 if (!buf->virt) {
283 mutex_destroy(&buf->lock);
284 kfree(buf);
285 return -ENOMEM;
286 }
287
288 if (fl->sctx && fl->sctx->sid)
289 buf->phys += ((u64)fl->sctx->sid << 32);
290
291 *obuf = buf;
292
293 return 0;
294 }
295
296 static void fastrpc_channel_ctx_free(struct kref *ref)
297 {
298 struct fastrpc_channel_ctx *cctx;
299
300 cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
301
302 kfree(cctx);
303 }
304
305 static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
306 {
307 kref_get(&cctx->refcount);
308 }
309
310 static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
311 {
312 kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
313 }
314
315 static void fastrpc_context_free(struct kref *ref)
316 {
317 struct fastrpc_invoke_ctx *ctx;
318 struct fastrpc_channel_ctx *cctx;
319 unsigned long flags;
320 int i;
321
322 ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
323 cctx = ctx->cctx;
324
325 for (i = 0; i < ctx->nscalars; i++)
326 fastrpc_map_put(ctx->maps[i]);
327
328 if (ctx->buf)
329 fastrpc_buf_free(ctx->buf);
330
331 spin_lock_irqsave(&cctx->lock, flags);
332 idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
333 spin_unlock_irqrestore(&cctx->lock, flags);
334
335 kfree(ctx->maps);
336 kfree(ctx->olaps);
337 kfree(ctx);
338
339 fastrpc_channel_ctx_put(cctx);
340 }
341
342 static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
343 {
344 kref_get(&ctx->refcount);
345 }
346
347 static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
348 {
349 kref_put(&ctx->refcount, fastrpc_context_free);
350 }
351
352 static void fastrpc_context_put_wq(struct work_struct *work)
353 {
354 struct fastrpc_invoke_ctx *ctx =
355 container_of(work, struct fastrpc_invoke_ctx, put_work);
356
357 fastrpc_context_put(ctx);
358 }
359
360 #define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
361 static int olaps_cmp(const void *a, const void *b)
362 {
363 struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
364 struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
365
366 int st = CMP(pa->start, pb->start);
367
368 int ed = CMP(pb->end, pa->end);
369
370 return st == 0 ? ed : st;
371 }
372
373 static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
374 {
375 u64 max_end = 0;
376 int i;
377
378 for (i = 0; i < ctx->nbufs; ++i) {
379 ctx->olaps[i].start = ctx->args[i].ptr;
380 ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
381 ctx->olaps[i].raix = i;
382 }
383
384 sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
385
386 for (i = 0; i < ctx->nbufs; ++i) {
387
388 if (ctx->olaps[i].start < max_end) {
389 ctx->olaps[i].mstart = max_end;
390 ctx->olaps[i].mend = ctx->olaps[i].end;
391 ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
392
393 if (ctx->olaps[i].end > max_end) {
394 max_end = ctx->olaps[i].end;
395 } else {
396 ctx->olaps[i].mend = 0;
397 ctx->olaps[i].mstart = 0;
398 }
399
400 } else {
401 ctx->olaps[i].mend = ctx->olaps[i].end;
402 ctx->olaps[i].mstart = ctx->olaps[i].start;
403 ctx->olaps[i].offset = 0;
404 max_end = ctx->olaps[i].end;
405 }
406 }
407 }
408
409 static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
410 struct fastrpc_user *user, u32 kernel, u32 sc,
411 struct fastrpc_invoke_args *args)
412 {
413 struct fastrpc_channel_ctx *cctx = user->cctx;
414 struct fastrpc_invoke_ctx *ctx = NULL;
415 unsigned long flags;
416 int ret;
417
418 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
419 if (!ctx)
420 return ERR_PTR(-ENOMEM);
421
422 INIT_LIST_HEAD(&ctx->node);
423 ctx->fl = user;
424 ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
425 ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
426 REMOTE_SCALARS_OUTBUFS(sc);
427
428 if (ctx->nscalars) {
429 ctx->maps = kcalloc(ctx->nscalars,
430 sizeof(*ctx->maps), GFP_KERNEL);
431 if (!ctx->maps) {
432 kfree(ctx);
433 return ERR_PTR(-ENOMEM);
434 }
435 ctx->olaps = kcalloc(ctx->nscalars,
436 sizeof(*ctx->olaps), GFP_KERNEL);
437 if (!ctx->olaps) {
438 kfree(ctx->maps);
439 kfree(ctx);
440 return ERR_PTR(-ENOMEM);
441 }
442 ctx->args = args;
443 fastrpc_get_buff_overlaps(ctx);
444 }
445
446
447 fastrpc_channel_ctx_get(cctx);
448
449 ctx->sc = sc;
450 ctx->retval = -1;
451 ctx->pid = current->pid;
452 ctx->tgid = user->tgid;
453 ctx->cctx = cctx;
454 init_completion(&ctx->work);
455 INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
456
457 spin_lock(&user->lock);
458 list_add_tail(&ctx->node, &user->pending);
459 spin_unlock(&user->lock);
460
461 spin_lock_irqsave(&cctx->lock, flags);
462 ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
463 FASTRPC_CTX_MAX, GFP_ATOMIC);
464 if (ret < 0) {
465 spin_unlock_irqrestore(&cctx->lock, flags);
466 goto err_idr;
467 }
468 ctx->ctxid = ret << 4;
469 spin_unlock_irqrestore(&cctx->lock, flags);
470
471 kref_init(&ctx->refcount);
472
473 return ctx;
474 err_idr:
475 spin_lock(&user->lock);
476 list_del(&ctx->node);
477 spin_unlock(&user->lock);
478 fastrpc_channel_ctx_put(cctx);
479 kfree(ctx->maps);
480 kfree(ctx->olaps);
481 kfree(ctx);
482
483 return ERR_PTR(ret);
484 }
485
486 static struct sg_table *
487 fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
488 enum dma_data_direction dir)
489 {
490 struct fastrpc_dma_buf_attachment *a = attachment->priv;
491 struct sg_table *table;
492
493 table = &a->sgt;
494
495 if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir))
496 return ERR_PTR(-ENOMEM);
497
498 return table;
499 }
500
501 static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
502 struct sg_table *table,
503 enum dma_data_direction dir)
504 {
505 dma_unmap_sg(attach->dev, table->sgl, table->nents, dir);
506 }
507
508 static void fastrpc_release(struct dma_buf *dmabuf)
509 {
510 struct fastrpc_buf *buffer = dmabuf->priv;
511
512 fastrpc_buf_free(buffer);
513 }
514
515 static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
516 struct dma_buf_attachment *attachment)
517 {
518 struct fastrpc_dma_buf_attachment *a;
519 struct fastrpc_buf *buffer = dmabuf->priv;
520 int ret;
521
522 a = kzalloc(sizeof(*a), GFP_KERNEL);
523 if (!a)
524 return -ENOMEM;
525
526 ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
527 FASTRPC_PHYS(buffer->phys), buffer->size);
528 if (ret < 0) {
529 dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
530 kfree(a);
531 return -EINVAL;
532 }
533
534 a->dev = attachment->dev;
535 INIT_LIST_HEAD(&a->node);
536 attachment->priv = a;
537
538 mutex_lock(&buffer->lock);
539 list_add(&a->node, &buffer->attachments);
540 mutex_unlock(&buffer->lock);
541
542 return 0;
543 }
544
545 static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
546 struct dma_buf_attachment *attachment)
547 {
548 struct fastrpc_dma_buf_attachment *a = attachment->priv;
549 struct fastrpc_buf *buffer = dmabuf->priv;
550
551 mutex_lock(&buffer->lock);
552 list_del(&a->node);
553 mutex_unlock(&buffer->lock);
554 sg_free_table(&a->sgt);
555 kfree(a);
556 }
557
558 static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
559 {
560 struct fastrpc_buf *buf = dmabuf->priv;
561
562 return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL;
563 }
564
565 static void *fastrpc_vmap(struct dma_buf *dmabuf)
566 {
567 struct fastrpc_buf *buf = dmabuf->priv;
568
569 return buf->virt;
570 }
571
572 static int fastrpc_mmap(struct dma_buf *dmabuf,
573 struct vm_area_struct *vma)
574 {
575 struct fastrpc_buf *buf = dmabuf->priv;
576 size_t size = vma->vm_end - vma->vm_start;
577
578 return dma_mmap_coherent(buf->dev, vma, buf->virt,
579 FASTRPC_PHYS(buf->phys), size);
580 }
581
582 static const struct dma_buf_ops fastrpc_dma_buf_ops = {
583 .attach = fastrpc_dma_buf_attach,
584 .detach = fastrpc_dma_buf_detatch,
585 .map_dma_buf = fastrpc_map_dma_buf,
586 .unmap_dma_buf = fastrpc_unmap_dma_buf,
587 .mmap = fastrpc_mmap,
588 .map = fastrpc_kmap,
589 .vmap = fastrpc_vmap,
590 .release = fastrpc_release,
591 };
592
593 static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
594 u64 len, struct fastrpc_map **ppmap)
595 {
596 struct fastrpc_session_ctx *sess = fl->sctx;
597 struct fastrpc_map *map = NULL;
598 int err = 0;
599
600 if (!fastrpc_map_find(fl, fd, ppmap))
601 return 0;
602
603 map = kzalloc(sizeof(*map), GFP_KERNEL);
604 if (!map)
605 return -ENOMEM;
606
607 INIT_LIST_HEAD(&map->node);
608 map->fl = fl;
609 map->fd = fd;
610 map->buf = dma_buf_get(fd);
611 if (IS_ERR(map->buf)) {
612 err = PTR_ERR(map->buf);
613 goto get_err;
614 }
615
616 map->attach = dma_buf_attach(map->buf, sess->dev);
617 if (IS_ERR(map->attach)) {
618 dev_err(sess->dev, "Failed to attach dmabuf\n");
619 err = PTR_ERR(map->attach);
620 goto attach_err;
621 }
622
623 map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
624 if (IS_ERR(map->table)) {
625 err = PTR_ERR(map->table);
626 goto map_err;
627 }
628
629 map->phys = sg_dma_address(map->table->sgl);
630 map->phys += ((u64)fl->sctx->sid << 32);
631 map->size = len;
632 map->va = sg_virt(map->table->sgl);
633 map->len = len;
634 kref_init(&map->refcount);
635
636 spin_lock(&fl->lock);
637 list_add_tail(&map->node, &fl->maps);
638 spin_unlock(&fl->lock);
639 *ppmap = map;
640
641 return 0;
642
643 map_err:
644 dma_buf_detach(map->buf, map->attach);
645 attach_err:
646 dma_buf_put(map->buf);
647 get_err:
648 kfree(map);
649
650 return err;
651 }
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680 static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
681 {
682 int size = 0;
683
684 size = (sizeof(struct fastrpc_remote_arg) +
685 sizeof(struct fastrpc_invoke_buf) +
686 sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
687 sizeof(u64) * FASTRPC_MAX_FDLIST +
688 sizeof(u32) * FASTRPC_MAX_CRCLIST;
689
690 return size;
691 }
692
693 static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
694 {
695 u64 size = 0;
696 int i;
697
698 size = ALIGN(metalen, FASTRPC_ALIGN);
699 for (i = 0; i < ctx->nscalars; i++) {
700 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
701
702 if (ctx->olaps[i].offset == 0)
703 size = ALIGN(size, FASTRPC_ALIGN);
704
705 size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
706 }
707 }
708
709 return size;
710 }
711
712 static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
713 {
714 struct device *dev = ctx->fl->sctx->dev;
715 int i, err;
716
717 for (i = 0; i < ctx->nscalars; ++i) {
718
719 if (ctx->args[i].reserved)
720 return -EINVAL;
721
722 if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
723 ctx->args[i].length == 0)
724 continue;
725
726 err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
727 ctx->args[i].length, &ctx->maps[i]);
728 if (err) {
729 dev_err(dev, "Error Creating map %d\n", err);
730 return -EINVAL;
731 }
732
733 }
734 return 0;
735 }
736
737 static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
738 {
739 struct device *dev = ctx->fl->sctx->dev;
740 struct fastrpc_remote_arg *rpra;
741 struct fastrpc_invoke_buf *list;
742 struct fastrpc_phy_page *pages;
743 int inbufs, i, oix, err = 0;
744 u64 len, rlen, pkt_size;
745 u64 pg_start, pg_end;
746 uintptr_t args;
747 int metalen;
748
749 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
750 metalen = fastrpc_get_meta_size(ctx);
751 pkt_size = fastrpc_get_payload_size(ctx, metalen);
752
753 err = fastrpc_create_maps(ctx);
754 if (err)
755 return err;
756
757 ctx->msg_sz = pkt_size;
758
759 err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
760 if (err)
761 return err;
762
763 rpra = ctx->buf->virt;
764 list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra);
765 pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) +
766 sizeof(*rpra));
767 args = (uintptr_t)ctx->buf->virt + metalen;
768 rlen = pkt_size - metalen;
769 ctx->rpra = rpra;
770
771 for (oix = 0; oix < ctx->nbufs; ++oix) {
772 int mlen;
773
774 i = ctx->olaps[oix].raix;
775 len = ctx->args[i].length;
776
777 rpra[i].pv = 0;
778 rpra[i].len = len;
779 list[i].num = len ? 1 : 0;
780 list[i].pgidx = i;
781
782 if (!len)
783 continue;
784
785 if (ctx->maps[i]) {
786 struct vm_area_struct *vma = NULL;
787
788 rpra[i].pv = (u64) ctx->args[i].ptr;
789 pages[i].addr = ctx->maps[i]->phys;
790
791 vma = find_vma(current->mm, ctx->args[i].ptr);
792 if (vma)
793 pages[i].addr += ctx->args[i].ptr -
794 vma->vm_start;
795
796 pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
797 pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
798 PAGE_SHIFT;
799 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
800
801 } else {
802
803 if (ctx->olaps[oix].offset == 0) {
804 rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
805 args = ALIGN(args, FASTRPC_ALIGN);
806 }
807
808 mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
809
810 if (rlen < mlen)
811 goto bail;
812
813 rpra[i].pv = args - ctx->olaps[oix].offset;
814 pages[i].addr = ctx->buf->phys -
815 ctx->olaps[oix].offset +
816 (pkt_size - rlen);
817 pages[i].addr = pages[i].addr & PAGE_MASK;
818
819 pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
820 pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
821 pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
822 args = args + mlen;
823 rlen -= mlen;
824 }
825
826 if (i < inbufs && !ctx->maps[i]) {
827 void *dst = (void *)(uintptr_t)rpra[i].pv;
828 void *src = (void *)(uintptr_t)ctx->args[i].ptr;
829
830 if (!kernel) {
831 if (copy_from_user(dst, (void __user *)src,
832 len)) {
833 err = -EFAULT;
834 goto bail;
835 }
836 } else {
837 memcpy(dst, src, len);
838 }
839 }
840 }
841
842 for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
843 rpra[i].pv = (u64) ctx->args[i].ptr;
844 rpra[i].len = ctx->args[i].length;
845 list[i].num = ctx->args[i].length ? 1 : 0;
846 list[i].pgidx = i;
847 pages[i].addr = ctx->maps[i]->phys;
848 pages[i].size = ctx->maps[i]->size;
849 }
850
851 bail:
852 if (err)
853 dev_err(dev, "Error: get invoke args failed:%d\n", err);
854
855 return err;
856 }
857
858 static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
859 u32 kernel)
860 {
861 struct fastrpc_remote_arg *rpra = ctx->rpra;
862 int i, inbufs;
863
864 inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
865
866 for (i = inbufs; i < ctx->nbufs; ++i) {
867 void *src = (void *)(uintptr_t)rpra[i].pv;
868 void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
869 u64 len = rpra[i].len;
870
871 if (!kernel) {
872 if (copy_to_user((void __user *)dst, src, len))
873 return -EFAULT;
874 } else {
875 memcpy(dst, src, len);
876 }
877 }
878
879 return 0;
880 }
881
882 static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
883 struct fastrpc_invoke_ctx *ctx,
884 u32 kernel, uint32_t handle)
885 {
886 struct fastrpc_channel_ctx *cctx;
887 struct fastrpc_user *fl = ctx->fl;
888 struct fastrpc_msg *msg = &ctx->msg;
889
890 cctx = fl->cctx;
891 msg->pid = fl->tgid;
892 msg->tid = current->pid;
893
894 if (kernel)
895 msg->pid = 0;
896
897 msg->ctx = ctx->ctxid | fl->pd;
898 msg->handle = handle;
899 msg->sc = ctx->sc;
900 msg->addr = ctx->buf ? ctx->buf->phys : 0;
901 msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
902 fastrpc_context_get(ctx);
903
904 return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
905 }
906
907 static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
908 u32 handle, u32 sc,
909 struct fastrpc_invoke_args *args)
910 {
911 struct fastrpc_invoke_ctx *ctx = NULL;
912 int err = 0;
913
914 if (!fl->sctx)
915 return -EINVAL;
916
917 if (!fl->cctx->rpdev)
918 return -EPIPE;
919
920 ctx = fastrpc_context_alloc(fl, kernel, sc, args);
921 if (IS_ERR(ctx))
922 return PTR_ERR(ctx);
923
924 if (ctx->nscalars) {
925 err = fastrpc_get_args(kernel, ctx);
926 if (err)
927 goto bail;
928 }
929
930
931 dma_wmb();
932
933 err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
934 if (err)
935 goto bail;
936
937
938 err = wait_for_completion_interruptible(&ctx->work);
939 if (err)
940 goto bail;
941
942
943 err = ctx->retval;
944 if (err)
945 goto bail;
946
947 if (ctx->nscalars) {
948
949 dma_rmb();
950
951 err = fastrpc_put_args(ctx, kernel);
952 if (err)
953 goto bail;
954 }
955
956 bail:
957
958 spin_lock(&fl->lock);
959 list_del(&ctx->node);
960 spin_unlock(&fl->lock);
961 fastrpc_context_put(ctx);
962
963 if (err)
964 dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
965
966 return err;
967 }
968
969 static int fastrpc_init_create_process(struct fastrpc_user *fl,
970 char __user *argp)
971 {
972 struct fastrpc_init_create init;
973 struct fastrpc_invoke_args *args;
974 struct fastrpc_phy_page pages[1];
975 struct fastrpc_map *map = NULL;
976 struct fastrpc_buf *imem = NULL;
977 int memlen;
978 int err;
979 struct {
980 int pgid;
981 u32 namelen;
982 u32 filelen;
983 u32 pageslen;
984 u32 attrs;
985 u32 siglen;
986 } inbuf;
987 u32 sc;
988
989 args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
990 if (!args)
991 return -ENOMEM;
992
993 if (copy_from_user(&init, argp, sizeof(init))) {
994 err = -EFAULT;
995 goto err;
996 }
997
998 if (init.filelen > INIT_FILELEN_MAX) {
999 err = -EINVAL;
1000 goto err;
1001 }
1002
1003 inbuf.pgid = fl->tgid;
1004 inbuf.namelen = strlen(current->comm) + 1;
1005 inbuf.filelen = init.filelen;
1006 inbuf.pageslen = 1;
1007 inbuf.attrs = init.attrs;
1008 inbuf.siglen = init.siglen;
1009 fl->pd = 1;
1010
1011 if (init.filelen && init.filefd) {
1012 err = fastrpc_map_create(fl, init.filefd, init.filelen, &map);
1013 if (err)
1014 goto err;
1015 }
1016
1017 memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1018 1024 * 1024);
1019 err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
1020 &imem);
1021 if (err)
1022 goto err_alloc;
1023
1024 fl->init_mem = imem;
1025 args[0].ptr = (u64)(uintptr_t)&inbuf;
1026 args[0].length = sizeof(inbuf);
1027 args[0].fd = -1;
1028
1029 args[1].ptr = (u64)(uintptr_t)current->comm;
1030 args[1].length = inbuf.namelen;
1031 args[1].fd = -1;
1032
1033 args[2].ptr = (u64) init.file;
1034 args[2].length = inbuf.filelen;
1035 args[2].fd = init.filefd;
1036
1037 pages[0].addr = imem->phys;
1038 pages[0].size = imem->size;
1039
1040 args[3].ptr = (u64)(uintptr_t) pages;
1041 args[3].length = 1 * sizeof(*pages);
1042 args[3].fd = -1;
1043
1044 args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
1045 args[4].length = sizeof(inbuf.attrs);
1046 args[4].fd = -1;
1047
1048 args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
1049 args[5].length = sizeof(inbuf.siglen);
1050 args[5].fd = -1;
1051
1052 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
1053 if (init.attrs)
1054 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0);
1055
1056 err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1057 sc, args);
1058 if (err)
1059 goto err_invoke;
1060
1061 kfree(args);
1062
1063 return 0;
1064
1065 err_invoke:
1066 fl->init_mem = NULL;
1067 fastrpc_buf_free(imem);
1068 err_alloc:
1069 if (map) {
1070 spin_lock(&fl->lock);
1071 list_del(&map->node);
1072 spin_unlock(&fl->lock);
1073 fastrpc_map_put(map);
1074 }
1075 err:
1076 kfree(args);
1077
1078 return err;
1079 }
1080
1081 static struct fastrpc_session_ctx *fastrpc_session_alloc(
1082 struct fastrpc_channel_ctx *cctx)
1083 {
1084 struct fastrpc_session_ctx *session = NULL;
1085 unsigned long flags;
1086 int i;
1087
1088 spin_lock_irqsave(&cctx->lock, flags);
1089 for (i = 0; i < cctx->sesscount; i++) {
1090 if (!cctx->session[i].used && cctx->session[i].valid) {
1091 cctx->session[i].used = true;
1092 session = &cctx->session[i];
1093 break;
1094 }
1095 }
1096 spin_unlock_irqrestore(&cctx->lock, flags);
1097
1098 return session;
1099 }
1100
1101 static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
1102 struct fastrpc_session_ctx *session)
1103 {
1104 unsigned long flags;
1105
1106 spin_lock_irqsave(&cctx->lock, flags);
1107 session->used = false;
1108 spin_unlock_irqrestore(&cctx->lock, flags);
1109 }
1110
1111 static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
1112 {
1113 struct fastrpc_invoke_args args[1];
1114 int tgid = 0;
1115 u32 sc;
1116
1117 tgid = fl->tgid;
1118 args[0].ptr = (u64)(uintptr_t) &tgid;
1119 args[0].length = sizeof(tgid);
1120 args[0].fd = -1;
1121 args[0].reserved = 0;
1122 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
1123
1124 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1125 sc, &args[0]);
1126 }
1127
1128 static int fastrpc_device_release(struct inode *inode, struct file *file)
1129 {
1130 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1131 struct fastrpc_channel_ctx *cctx = fl->cctx;
1132 struct fastrpc_invoke_ctx *ctx, *n;
1133 struct fastrpc_map *map, *m;
1134 unsigned long flags;
1135
1136 fastrpc_release_current_dsp_process(fl);
1137
1138 spin_lock_irqsave(&cctx->lock, flags);
1139 list_del(&fl->user);
1140 spin_unlock_irqrestore(&cctx->lock, flags);
1141
1142 if (fl->init_mem)
1143 fastrpc_buf_free(fl->init_mem);
1144
1145 list_for_each_entry_safe(ctx, n, &fl->pending, node) {
1146 list_del(&ctx->node);
1147 fastrpc_context_put(ctx);
1148 }
1149
1150 list_for_each_entry_safe(map, m, &fl->maps, node) {
1151 list_del(&map->node);
1152 fastrpc_map_put(map);
1153 }
1154
1155 fastrpc_session_free(cctx, fl->sctx);
1156 fastrpc_channel_ctx_put(cctx);
1157
1158 mutex_destroy(&fl->mutex);
1159 kfree(fl);
1160 file->private_data = NULL;
1161
1162 return 0;
1163 }
1164
1165 static int fastrpc_device_open(struct inode *inode, struct file *filp)
1166 {
1167 struct fastrpc_channel_ctx *cctx = miscdev_to_cctx(filp->private_data);
1168 struct fastrpc_user *fl = NULL;
1169 unsigned long flags;
1170
1171 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
1172 if (!fl)
1173 return -ENOMEM;
1174
1175
1176 fastrpc_channel_ctx_get(cctx);
1177
1178 filp->private_data = fl;
1179 spin_lock_init(&fl->lock);
1180 mutex_init(&fl->mutex);
1181 INIT_LIST_HEAD(&fl->pending);
1182 INIT_LIST_HEAD(&fl->maps);
1183 INIT_LIST_HEAD(&fl->user);
1184 fl->tgid = current->tgid;
1185 fl->cctx = cctx;
1186
1187 fl->sctx = fastrpc_session_alloc(cctx);
1188 if (!fl->sctx) {
1189 dev_err(&cctx->rpdev->dev, "No session available\n");
1190 mutex_destroy(&fl->mutex);
1191 kfree(fl);
1192
1193 return -EBUSY;
1194 }
1195
1196 spin_lock_irqsave(&cctx->lock, flags);
1197 list_add_tail(&fl->user, &cctx->users);
1198 spin_unlock_irqrestore(&cctx->lock, flags);
1199
1200 return 0;
1201 }
1202
1203 static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
1204 {
1205 struct fastrpc_alloc_dma_buf bp;
1206 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1207 struct fastrpc_buf *buf = NULL;
1208 int err;
1209
1210 if (copy_from_user(&bp, argp, sizeof(bp)))
1211 return -EFAULT;
1212
1213 err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
1214 if (err)
1215 return err;
1216 exp_info.ops = &fastrpc_dma_buf_ops;
1217 exp_info.size = bp.size;
1218 exp_info.flags = O_RDWR;
1219 exp_info.priv = buf;
1220 buf->dmabuf = dma_buf_export(&exp_info);
1221 if (IS_ERR(buf->dmabuf)) {
1222 err = PTR_ERR(buf->dmabuf);
1223 fastrpc_buf_free(buf);
1224 return err;
1225 }
1226
1227 bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
1228 if (bp.fd < 0) {
1229 dma_buf_put(buf->dmabuf);
1230 return -EINVAL;
1231 }
1232
1233 if (copy_to_user(argp, &bp, sizeof(bp))) {
1234 dma_buf_put(buf->dmabuf);
1235 return -EFAULT;
1236 }
1237
1238 return 0;
1239 }
1240
1241 static int fastrpc_init_attach(struct fastrpc_user *fl)
1242 {
1243 struct fastrpc_invoke_args args[1];
1244 int tgid = fl->tgid;
1245 u32 sc;
1246
1247 args[0].ptr = (u64)(uintptr_t) &tgid;
1248 args[0].length = sizeof(tgid);
1249 args[0].fd = -1;
1250 args[0].reserved = 0;
1251 sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
1252 fl->pd = 0;
1253
1254 return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
1255 sc, &args[0]);
1256 }
1257
1258 static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
1259 {
1260 struct fastrpc_invoke_args *args = NULL;
1261 struct fastrpc_invoke inv;
1262 u32 nscalars;
1263 int err;
1264
1265 if (copy_from_user(&inv, argp, sizeof(inv)))
1266 return -EFAULT;
1267
1268
1269 nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
1270 if (nscalars) {
1271 args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
1272 if (!args)
1273 return -ENOMEM;
1274
1275 if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
1276 nscalars * sizeof(*args))) {
1277 kfree(args);
1278 return -EFAULT;
1279 }
1280 }
1281
1282 err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
1283 kfree(args);
1284
1285 return err;
1286 }
1287
1288 static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
1289 unsigned long arg)
1290 {
1291 struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
1292 char __user *argp = (char __user *)arg;
1293 int err;
1294
1295 switch (cmd) {
1296 case FASTRPC_IOCTL_INVOKE:
1297 err = fastrpc_invoke(fl, argp);
1298 break;
1299 case FASTRPC_IOCTL_INIT_ATTACH:
1300 err = fastrpc_init_attach(fl);
1301 break;
1302 case FASTRPC_IOCTL_INIT_CREATE:
1303 err = fastrpc_init_create_process(fl, argp);
1304 break;
1305 case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
1306 err = fastrpc_dmabuf_alloc(fl, argp);
1307 break;
1308 default:
1309 err = -ENOTTY;
1310 break;
1311 }
1312
1313 return err;
1314 }
1315
1316 static const struct file_operations fastrpc_fops = {
1317 .open = fastrpc_device_open,
1318 .release = fastrpc_device_release,
1319 .unlocked_ioctl = fastrpc_device_ioctl,
1320 .compat_ioctl = fastrpc_device_ioctl,
1321 };
1322
1323 static int fastrpc_cb_probe(struct platform_device *pdev)
1324 {
1325 struct fastrpc_channel_ctx *cctx;
1326 struct fastrpc_session_ctx *sess;
1327 struct device *dev = &pdev->dev;
1328 int i, sessions = 0;
1329 unsigned long flags;
1330 int rc;
1331
1332 cctx = dev_get_drvdata(dev->parent);
1333 if (!cctx)
1334 return -EINVAL;
1335
1336 of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
1337
1338 spin_lock_irqsave(&cctx->lock, flags);
1339 sess = &cctx->session[cctx->sesscount];
1340 sess->used = false;
1341 sess->valid = true;
1342 sess->dev = dev;
1343 dev_set_drvdata(dev, sess);
1344
1345 if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
1346 dev_info(dev, "FastRPC Session ID not specified in DT\n");
1347
1348 if (sessions > 0) {
1349 struct fastrpc_session_ctx *dup_sess;
1350
1351 for (i = 1; i < sessions; i++) {
1352 if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
1353 break;
1354 dup_sess = &cctx->session[cctx->sesscount];
1355 memcpy(dup_sess, sess, sizeof(*dup_sess));
1356 }
1357 }
1358 cctx->sesscount++;
1359 spin_unlock_irqrestore(&cctx->lock, flags);
1360 rc = dma_set_mask(dev, DMA_BIT_MASK(32));
1361 if (rc) {
1362 dev_err(dev, "32-bit DMA enable failed\n");
1363 return rc;
1364 }
1365
1366 return 0;
1367 }
1368
1369 static int fastrpc_cb_remove(struct platform_device *pdev)
1370 {
1371 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
1372 struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
1373 unsigned long flags;
1374 int i;
1375
1376 spin_lock_irqsave(&cctx->lock, flags);
1377 for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
1378 if (cctx->session[i].sid == sess->sid) {
1379 cctx->session[i].valid = false;
1380 cctx->sesscount--;
1381 }
1382 }
1383 spin_unlock_irqrestore(&cctx->lock, flags);
1384
1385 return 0;
1386 }
1387
1388 static const struct of_device_id fastrpc_match_table[] = {
1389 { .compatible = "qcom,fastrpc-compute-cb", },
1390 {}
1391 };
1392
1393 static struct platform_driver fastrpc_cb_driver = {
1394 .probe = fastrpc_cb_probe,
1395 .remove = fastrpc_cb_remove,
1396 .driver = {
1397 .name = "qcom,fastrpc-cb",
1398 .of_match_table = fastrpc_match_table,
1399 .suppress_bind_attrs = true,
1400 },
1401 };
1402
1403 static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
1404 {
1405 struct device *rdev = &rpdev->dev;
1406 struct fastrpc_channel_ctx *data;
1407 int i, err, domain_id = -1;
1408 const char *domain;
1409
1410 err = of_property_read_string(rdev->of_node, "label", &domain);
1411 if (err) {
1412 dev_info(rdev, "FastRPC Domain not specified in DT\n");
1413 return err;
1414 }
1415
1416 for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
1417 if (!strcmp(domains[i], domain)) {
1418 domain_id = i;
1419 break;
1420 }
1421 }
1422
1423 if (domain_id < 0) {
1424 dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
1425 return -EINVAL;
1426 }
1427
1428 data = kzalloc(sizeof(*data), GFP_KERNEL);
1429 if (!data)
1430 return -ENOMEM;
1431
1432 data->miscdev.minor = MISC_DYNAMIC_MINOR;
1433 data->miscdev.name = devm_kasprintf(rdev, GFP_KERNEL, "fastrpc-%s",
1434 domains[domain_id]);
1435 data->miscdev.fops = &fastrpc_fops;
1436 err = misc_register(&data->miscdev);
1437 if (err)
1438 return err;
1439
1440 kref_init(&data->refcount);
1441
1442 dev_set_drvdata(&rpdev->dev, data);
1443 dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
1444 INIT_LIST_HEAD(&data->users);
1445 spin_lock_init(&data->lock);
1446 idr_init(&data->ctx_idr);
1447 data->domain_id = domain_id;
1448 data->rpdev = rpdev;
1449
1450 return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
1451 }
1452
1453 static void fastrpc_notify_users(struct fastrpc_user *user)
1454 {
1455 struct fastrpc_invoke_ctx *ctx;
1456
1457 spin_lock(&user->lock);
1458 list_for_each_entry(ctx, &user->pending, node)
1459 complete(&ctx->work);
1460 spin_unlock(&user->lock);
1461 }
1462
1463 static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
1464 {
1465 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1466 struct fastrpc_user *user;
1467 unsigned long flags;
1468
1469 spin_lock_irqsave(&cctx->lock, flags);
1470 list_for_each_entry(user, &cctx->users, user)
1471 fastrpc_notify_users(user);
1472 spin_unlock_irqrestore(&cctx->lock, flags);
1473
1474 misc_deregister(&cctx->miscdev);
1475 of_platform_depopulate(&rpdev->dev);
1476
1477 cctx->rpdev = NULL;
1478 fastrpc_channel_ctx_put(cctx);
1479 }
1480
1481 static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
1482 int len, void *priv, u32 addr)
1483 {
1484 struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
1485 struct fastrpc_invoke_rsp *rsp = data;
1486 struct fastrpc_invoke_ctx *ctx;
1487 unsigned long flags;
1488 unsigned long ctxid;
1489
1490 if (len < sizeof(*rsp))
1491 return -EINVAL;
1492
1493 ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
1494
1495 spin_lock_irqsave(&cctx->lock, flags);
1496 ctx = idr_find(&cctx->ctx_idr, ctxid);
1497 spin_unlock_irqrestore(&cctx->lock, flags);
1498
1499 if (!ctx) {
1500 dev_err(&rpdev->dev, "No context ID matches response\n");
1501 return -ENOENT;
1502 }
1503
1504 ctx->retval = rsp->retval;
1505 complete(&ctx->work);
1506
1507
1508
1509
1510
1511
1512 schedule_work(&ctx->put_work);
1513
1514 return 0;
1515 }
1516
1517 static const struct of_device_id fastrpc_rpmsg_of_match[] = {
1518 { .compatible = "qcom,fastrpc" },
1519 { },
1520 };
1521 MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
1522
1523 static struct rpmsg_driver fastrpc_driver = {
1524 .probe = fastrpc_rpmsg_probe,
1525 .remove = fastrpc_rpmsg_remove,
1526 .callback = fastrpc_rpmsg_callback,
1527 .drv = {
1528 .name = "qcom,fastrpc",
1529 .of_match_table = fastrpc_rpmsg_of_match,
1530 },
1531 };
1532
1533 static int fastrpc_init(void)
1534 {
1535 int ret;
1536
1537 ret = platform_driver_register(&fastrpc_cb_driver);
1538 if (ret < 0) {
1539 pr_err("fastrpc: failed to register cb driver\n");
1540 return ret;
1541 }
1542
1543 ret = register_rpmsg_driver(&fastrpc_driver);
1544 if (ret < 0) {
1545 pr_err("fastrpc: failed to register rpmsg driver\n");
1546 platform_driver_unregister(&fastrpc_cb_driver);
1547 return ret;
1548 }
1549
1550 return 0;
1551 }
1552 module_init(fastrpc_init);
1553
1554 static void fastrpc_exit(void)
1555 {
1556 platform_driver_unregister(&fastrpc_cb_driver);
1557 unregister_rpmsg_driver(&fastrpc_driver);
1558 }
1559 module_exit(fastrpc_exit);
1560
1561 MODULE_LICENSE("GPL v2");