Lines Matching refs:node

137 	struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);  in gk20a_instobj_cpu_map_dma()  local
138 struct device *dev = node->base.imem->base.subdev.device->dev; in gk20a_instobj_cpu_map_dma()
145 pages[0] = pfn_to_page(dma_to_phys(dev, node->handle) >> PAGE_SHIFT); in gk20a_instobj_cpu_map_dma()
159 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); in gk20a_instobj_cpu_map_iommu() local
162 return vmap(node->pages, npages, VM_MAP, in gk20a_instobj_cpu_map_iommu()
194 struct gk20a_instobj *node = gk20a_instobj(memory); in gk20a_instobj_acquire() local
195 struct gk20a_instmem *imem = node->imem; in gk20a_instobj_acquire()
204 if (node->vaddr) { in gk20a_instobj_acquire()
206 list_del(&node->vaddr_node); in gk20a_instobj_acquire()
214 node->vaddr = imem->cpu_map(memory); in gk20a_instobj_acquire()
216 if (!node->vaddr) { in gk20a_instobj_acquire()
229 return node->vaddr; in gk20a_instobj_acquire()
235 struct gk20a_instobj *node = gk20a_instobj(memory); in gk20a_instobj_release() local
236 struct gk20a_instmem *imem = node->imem; in gk20a_instobj_release()
243 list_add_tail(&node->vaddr_node, &imem->vaddr_lru); in gk20a_instobj_release()
254 struct gk20a_instobj *node = gk20a_instobj(memory); in gk20a_instobj_rd32() local
256 return node->vaddr[offset / 4]; in gk20a_instobj_rd32()
262 struct gk20a_instobj *node = gk20a_instobj(memory); in gk20a_instobj_wr32() local
264 node->vaddr[offset / 4] = data; in gk20a_instobj_wr32()
270 struct gk20a_instobj *node = gk20a_instobj(memory); in gk20a_instobj_map() local
272 nvkm_vm_map_at(vma, offset, &node->mem); in gk20a_instobj_map()
279 gk20a_instobj_dtor(struct gk20a_instobj *node) in gk20a_instobj_dtor() argument
281 struct gk20a_instmem *imem = node->imem; in gk20a_instobj_dtor()
287 if (!node->vaddr) in gk20a_instobj_dtor()
291 if (obj == node) { in gk20a_instobj_dtor()
296 vunmap(node->vaddr); in gk20a_instobj_dtor()
297 node->vaddr = NULL; in gk20a_instobj_dtor()
298 imem->vaddr_use -= nvkm_memory_size(&node->memory); in gk20a_instobj_dtor()
309 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); in gk20a_instobj_dtor_dma() local
310 struct gk20a_instmem *imem = node->base.imem; in gk20a_instobj_dtor_dma()
313 gk20a_instobj_dtor(&node->base); in gk20a_instobj_dtor_dma()
315 if (unlikely(!node->cpuaddr)) in gk20a_instobj_dtor_dma()
318 dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->cpuaddr, in gk20a_instobj_dtor_dma()
319 node->handle, &imem->attrs); in gk20a_instobj_dtor_dma()
322 return node; in gk20a_instobj_dtor_dma()
328 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); in gk20a_instobj_dtor_iommu() local
329 struct gk20a_instmem *imem = node->base.imem; in gk20a_instobj_dtor_iommu()
334 gk20a_instobj_dtor(&node->base); in gk20a_instobj_dtor_iommu()
336 if (unlikely(list_empty(&node->base.mem.regions))) in gk20a_instobj_dtor_iommu()
339 r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node, in gk20a_instobj_dtor_iommu()
346 for (i = 0; i < node->base.mem.size; i++) { in gk20a_instobj_dtor_iommu()
349 dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, in gk20a_instobj_dtor_iommu()
351 __free_page(node->pages[i]); in gk20a_instobj_dtor_iommu()
360 return node; in gk20a_instobj_dtor_iommu()
393 struct gk20a_instobj_dma *node; in gk20a_instobj_ctor_dma() local
397 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) in gk20a_instobj_ctor_dma()
399 *_node = &node->base; in gk20a_instobj_ctor_dma()
401 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); in gk20a_instobj_ctor_dma()
403 node->cpuaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, in gk20a_instobj_ctor_dma()
404 &node->handle, GFP_KERNEL, in gk20a_instobj_ctor_dma()
406 if (!node->cpuaddr) { in gk20a_instobj_ctor_dma()
412 if (unlikely(node->handle & (align - 1))) in gk20a_instobj_ctor_dma()
415 &node->handle, align); in gk20a_instobj_ctor_dma()
418 node->r.type = 12; in gk20a_instobj_ctor_dma()
419 node->r.offset = node->handle >> 12; in gk20a_instobj_ctor_dma()
420 node->r.length = (npages << PAGE_SHIFT) >> 12; in gk20a_instobj_ctor_dma()
422 node->base.mem.offset = node->handle; in gk20a_instobj_ctor_dma()
424 INIT_LIST_HEAD(&node->base.mem.regions); in gk20a_instobj_ctor_dma()
425 list_add_tail(&node->r.rl_entry, &node->base.mem.regions); in gk20a_instobj_ctor_dma()
434 struct gk20a_instobj_iommu *node; in gk20a_instobj_ctor_iommu() local
445 if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) + in gk20a_instobj_ctor_iommu()
446 sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) in gk20a_instobj_ctor_iommu()
448 *_node = &node->base; in gk20a_instobj_ctor_iommu()
449 node->dma_addrs = (void *)(node->pages + npages); in gk20a_instobj_ctor_iommu()
451 nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); in gk20a_instobj_ctor_iommu()
462 node->pages[i] = p; in gk20a_instobj_ctor_iommu()
469 node->dma_addrs[i] = dma_adr; in gk20a_instobj_ctor_iommu()
486 ret = iommu_map(imem->domain, offset, node->dma_addrs[i], in gk20a_instobj_ctor_iommu()
502 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; in gk20a_instobj_ctor_iommu()
504 INIT_LIST_HEAD(&node->base.mem.regions); in gk20a_instobj_ctor_iommu()
505 list_add_tail(&r->rl_entry, &node->base.mem.regions); in gk20a_instobj_ctor_iommu()
515 for (i = 0; i < npages && node->pages[i] != NULL; i++) { in gk20a_instobj_ctor_iommu()
516 dma_addr_t dma_addr = node->dma_addrs[i]; in gk20a_instobj_ctor_iommu()
520 __free_page(node->pages[i]); in gk20a_instobj_ctor_iommu()
532 struct gk20a_instobj *node = NULL; in gk20a_instobj_new() local
544 align, &node); in gk20a_instobj_new()
547 align, &node); in gk20a_instobj_new()
548 *pmemory = node ? &node->memory : NULL; in gk20a_instobj_new()
552 node->imem = imem; in gk20a_instobj_new()
555 node->mem.size = size >> 12; in gk20a_instobj_new()
556 node->mem.memtype = 0; in gk20a_instobj_new()
557 node->mem.page_shift = 12; in gk20a_instobj_new()
560 size, align, node->mem.offset); in gk20a_instobj_new()