Lines Matching refs:imem

55 	struct gk20a_instmem *imem;  member
138 struct device *dev = node->base.imem->base.subdev.device->dev; in gk20a_instobj_cpu_map_dma()
170 gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) in gk20a_instmem_vaddr_gc() argument
172 while (imem->vaddr_use + size > imem->vaddr_max) { in gk20a_instmem_vaddr_gc()
176 if (list_empty(&imem->vaddr_lru)) in gk20a_instmem_vaddr_gc()
179 obj = list_first_entry(&imem->vaddr_lru, struct gk20a_instobj, in gk20a_instmem_vaddr_gc()
184 imem->vaddr_use -= nvkm_memory_size(&obj->memory); in gk20a_instmem_vaddr_gc()
185 nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n", in gk20a_instmem_vaddr_gc()
186 imem->vaddr_use, imem->vaddr_max); in gk20a_instmem_vaddr_gc()
195 struct gk20a_instmem *imem = node->imem; in gk20a_instobj_acquire() local
196 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; in gk20a_instobj_acquire()
202 spin_lock_irqsave(&imem->lock, flags); in gk20a_instobj_acquire()
212 gk20a_instmem_vaddr_gc(imem, size); in gk20a_instobj_acquire()
214 node->vaddr = imem->cpu_map(memory); in gk20a_instobj_acquire()
217 nvkm_error(&imem->base.subdev, "cannot map instobj - " in gk20a_instobj_acquire()
222 imem->vaddr_use += size; in gk20a_instobj_acquire()
223 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", in gk20a_instobj_acquire()
224 imem->vaddr_use, imem->vaddr_max); in gk20a_instobj_acquire()
227 spin_unlock_irqrestore(&imem->lock, flags); in gk20a_instobj_acquire()
236 struct gk20a_instmem *imem = node->imem; in gk20a_instobj_release() local
237 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; in gk20a_instobj_release()
240 spin_lock_irqsave(&imem->lock, flags); in gk20a_instobj_release()
243 list_add_tail(&node->vaddr_node, &imem->vaddr_lru); in gk20a_instobj_release()
245 spin_unlock_irqrestore(&imem->lock, flags); in gk20a_instobj_release()
281 struct gk20a_instmem *imem = node->imem; in gk20a_instobj_dtor() local
285 spin_lock_irqsave(&imem->lock, flags); in gk20a_instobj_dtor()
290 list_for_each_entry(obj, &imem->vaddr_lru, vaddr_node) { in gk20a_instobj_dtor()
298 imem->vaddr_use -= nvkm_memory_size(&node->memory); in gk20a_instobj_dtor()
299 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", in gk20a_instobj_dtor()
300 imem->vaddr_use, imem->vaddr_max); in gk20a_instobj_dtor()
303 spin_unlock_irqrestore(&imem->lock, flags); in gk20a_instobj_dtor()
310 struct gk20a_instmem *imem = node->base.imem; in gk20a_instobj_dtor_dma() local
311 struct device *dev = imem->base.subdev.device->dev; in gk20a_instobj_dtor_dma()
319 node->handle, &imem->attrs); in gk20a_instobj_dtor_dma()
329 struct gk20a_instmem *imem = node->base.imem; in gk20a_instobj_dtor_iommu() local
330 struct device *dev = imem->base.subdev.device->dev; in gk20a_instobj_dtor_iommu()
343 r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); in gk20a_instobj_dtor_iommu()
347 iommu_unmap(imem->domain, in gk20a_instobj_dtor_iommu()
348 (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); in gk20a_instobj_dtor_iommu()
355 mutex_lock(imem->mm_mutex); in gk20a_instobj_dtor_iommu()
356 nvkm_mm_free(imem->mm, &r); in gk20a_instobj_dtor_iommu()
357 mutex_unlock(imem->mm_mutex); in gk20a_instobj_dtor_iommu()
390 gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, in gk20a_instobj_ctor_dma() argument
394 struct nvkm_subdev *subdev = &imem->base.subdev; in gk20a_instobj_ctor_dma()
405 &imem->attrs); in gk20a_instobj_ctor_dma()
431 gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, in gk20a_instobj_ctor_iommu() argument
435 struct nvkm_subdev *subdev = &imem->base.subdev; in gk20a_instobj_ctor_iommu()
472 mutex_lock(imem->mm_mutex); in gk20a_instobj_ctor_iommu()
474 ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages, in gk20a_instobj_ctor_iommu()
475 align >> imem->iommu_pgshift, &r); in gk20a_instobj_ctor_iommu()
476 mutex_unlock(imem->mm_mutex); in gk20a_instobj_ctor_iommu()
484 u32 offset = (r->offset + i) << imem->iommu_pgshift; in gk20a_instobj_ctor_iommu()
486 ret = iommu_map(imem->domain, offset, node->dma_addrs[i], in gk20a_instobj_ctor_iommu()
493 iommu_unmap(imem->domain, offset, PAGE_SIZE); in gk20a_instobj_ctor_iommu()
500 r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); in gk20a_instobj_ctor_iommu()
502 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; in gk20a_instobj_ctor_iommu()
510 mutex_lock(imem->mm_mutex); in gk20a_instobj_ctor_iommu()
511 nvkm_mm_free(imem->mm, &r); in gk20a_instobj_ctor_iommu()
512 mutex_unlock(imem->mm_mutex); in gk20a_instobj_ctor_iommu()
530 struct gk20a_instmem *imem = gk20a_instmem(base); in gk20a_instobj_new() local
531 struct nvkm_subdev *subdev = &imem->base.subdev; in gk20a_instobj_new()
536 imem->domain ? "IOMMU" : "DMA", size, align); in gk20a_instobj_new()
542 if (imem->domain) in gk20a_instobj_new()
543 ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT, in gk20a_instobj_new()
546 ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT, in gk20a_instobj_new()
552 node->imem = imem; in gk20a_instobj_new()
568 struct gk20a_instmem *imem = gk20a_instmem(base); in gk20a_instmem_dtor() local
571 if (!list_empty(&imem->vaddr_lru)) in gk20a_instmem_dtor()
574 if (imem->vaddr_use != 0) in gk20a_instmem_dtor()
576 "0x%x bytes still mapped\n", imem->vaddr_use); in gk20a_instmem_dtor()
578 return imem; in gk20a_instmem_dtor()
594 struct gk20a_instmem *imem; in gk20a_instmem_new() local
596 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) in gk20a_instmem_new()
598 nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base); in gk20a_instmem_new()
599 spin_lock_init(&imem->lock); in gk20a_instmem_new()
600 *pimem = &imem->base; in gk20a_instmem_new()
603 imem->vaddr_use = 0; in gk20a_instmem_new()
604 imem->vaddr_max = 0x100000; in gk20a_instmem_new()
605 INIT_LIST_HEAD(&imem->vaddr_lru); in gk20a_instmem_new()
608 imem->mm_mutex = &tdev->iommu.mutex; in gk20a_instmem_new()
609 imem->mm = &tdev->iommu.mm; in gk20a_instmem_new()
610 imem->domain = tdev->iommu.domain; in gk20a_instmem_new()
611 imem->iommu_pgshift = tdev->iommu.pgshift; in gk20a_instmem_new()
612 imem->cpu_map = gk20a_instobj_cpu_map_iommu; in gk20a_instmem_new()
613 imem->iommu_bit = tdev->func->iommu_bit; in gk20a_instmem_new()
615 nvkm_info(&imem->base.subdev, "using IOMMU\n"); in gk20a_instmem_new()
617 init_dma_attrs(&imem->attrs); in gk20a_instmem_new()
619 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs); in gk20a_instmem_new()
620 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs); in gk20a_instmem_new()
621 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs); in gk20a_instmem_new()
622 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &imem->attrs); in gk20a_instmem_new()
623 imem->cpu_map = gk20a_instobj_cpu_map_dma; in gk20a_instmem_new()
625 nvkm_info(&imem->base.subdev, "using DMA API\n"); in gk20a_instmem_new()