1/* 2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, 3 * All Rights Reserved. 4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sub license, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 */ 26 27#include "nouveau_drm.h" 28#include "nouveau_ttm.h" 29#include "nouveau_gem.h" 30 31#include "drm_legacy.h" 32static int 33nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 34{ 35 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 36 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 37 man->priv = pfb; 38 return 0; 39} 40 41static int 42nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) 43{ 44 man->priv = NULL; 45 return 0; 46} 47 48static inline void 49nvkm_mem_node_cleanup(struct nvkm_mem *node) 50{ 51 if (node->vma[0].node) { 52 nvkm_vm_unmap(&node->vma[0]); 53 nvkm_vm_put(&node->vma[0]); 54 } 55 56 if (node->vma[1].node) { 57 nvkm_vm_unmap(&node->vma[1]); 58 nvkm_vm_put(&node->vma[1]); 59 } 60} 61 62static void 63nouveau_vram_manager_del(struct ttm_mem_type_manager *man, 64 struct ttm_mem_reg *mem) 65{ 66 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 67 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 68 nvkm_mem_node_cleanup(mem->mm_node); 69 pfb->ram->put(pfb, (struct nvkm_mem **)&mem->mm_node); 70} 71 72static int 73nouveau_vram_manager_new(struct ttm_mem_type_manager *man, 74 struct ttm_buffer_object *bo, 75 const struct ttm_place *place, 76 struct ttm_mem_reg *mem) 77{ 78 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 79 struct nvkm_fb *pfb = nvxx_fb(&drm->device); 80 struct nouveau_bo *nvbo = nouveau_bo(bo); 81 struct nvkm_mem *node; 82 u32 size_nc = 0; 83 int ret; 84 85 if (drm->device.info.ram_size == 0) 86 return -ENOMEM; 87 88 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) 89 size_nc = 1 << nvbo->page_shift; 90 91 ret = pfb->ram->get(pfb, mem->num_pages << PAGE_SHIFT, 92 mem->page_alignment << PAGE_SHIFT, size_nc, 93 (nvbo->tile_flags >> 8) & 0x3ff, &node); 94 if (ret) { 95 mem->mm_node = NULL; 96 return (ret == -ENOSPC) ? 0 : ret; 97 } 98 99 node->page_shift = nvbo->page_shift; 100 101 mem->mm_node = node; 102 mem->start = node->offset >> PAGE_SHIFT; 103 return 0; 104} 105 106static void 107nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 108{ 109 struct nvkm_fb *pfb = man->priv; 110 struct nvkm_mm *mm = &pfb->vram; 111 struct nvkm_mm_node *r; 112 u32 total = 0, free = 0; 113 114 mutex_lock(&nv_subdev(pfb)->mutex); 115 list_for_each_entry(r, &mm->nodes, nl_entry) { 116 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", 117 prefix, r->type, ((u64)r->offset << 12), 118 (((u64)r->offset + r->length) << 12)); 119 120 total += r->length; 121 if (!r->type) 122 free += r->length; 123 } 124 mutex_unlock(&nv_subdev(pfb)->mutex); 125 126 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", 127 prefix, (u64)total << 12, (u64)free << 12); 128 printk(KERN_DEBUG "%s block: 0x%08x\n", 129 prefix, mm->block_size << 12); 130} 131 132const struct ttm_mem_type_manager_func nouveau_vram_manager = { 133 nouveau_vram_manager_init, 134 nouveau_vram_manager_fini, 135 nouveau_vram_manager_new, 136 nouveau_vram_manager_del, 137 nouveau_vram_manager_debug 138}; 139 140static int 141nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 142{ 143 return 0; 144} 145 146static int 147nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) 148{ 149 return 0; 150} 151 152static void 153nouveau_gart_manager_del(struct ttm_mem_type_manager *man, 154 struct ttm_mem_reg *mem) 155{ 156 nvkm_mem_node_cleanup(mem->mm_node); 157 kfree(mem->mm_node); 158 mem->mm_node = NULL; 159} 160 161static int 162nouveau_gart_manager_new(struct ttm_mem_type_manager *man, 163 struct ttm_buffer_object *bo, 164 const struct ttm_place *place, 165 struct ttm_mem_reg *mem) 166{ 167 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 168 struct nouveau_bo *nvbo = nouveau_bo(bo); 169 struct nvkm_mem *node; 170 171 node = kzalloc(sizeof(*node), GFP_KERNEL); 172 if (!node) 173 return -ENOMEM; 174 175 node->page_shift = 12; 176 177 switch (drm->device.info.family) { 178 case NV_DEVICE_INFO_V0_TESLA: 179 if (drm->device.info.chipset != 0x50) 180 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 181 break; 182 case NV_DEVICE_INFO_V0_FERMI: 183 case NV_DEVICE_INFO_V0_KEPLER: 184 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 185 break; 186 default: 187 break; 188 } 189 190 mem->mm_node = node; 191 mem->start = 0; 192 return 0; 193} 194 195static void 196nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 197{ 198} 199 200const struct ttm_mem_type_manager_func nouveau_gart_manager = { 201 nouveau_gart_manager_init, 202 nouveau_gart_manager_fini, 203 nouveau_gart_manager_new, 204 nouveau_gart_manager_del, 205 nouveau_gart_manager_debug 206}; 207 208/*XXX*/ 209#include <subdev/mmu/nv04.h> 210static int 211nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 212{ 213 struct nouveau_drm *drm = nouveau_bdev(man->bdev); 214 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device); 215 struct nv04_mmu_priv *priv = (void *)mmu; 216 struct nvkm_vm *vm = NULL; 217 nvkm_vm_ref(priv->vm, &vm, NULL); 218 man->priv = vm; 219 return 0; 220} 221 222static int 223nv04_gart_manager_fini(struct ttm_mem_type_manager *man) 224{ 225 struct nvkm_vm *vm = man->priv; 226 nvkm_vm_ref(NULL, &vm, NULL); 227 man->priv = NULL; 228 return 0; 229} 230 231static void 232nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) 233{ 234 struct nvkm_mem *node = mem->mm_node; 235 if (node->vma[0].node) 236 nvkm_vm_put(&node->vma[0]); 237 kfree(mem->mm_node); 238 mem->mm_node = NULL; 239} 240 241static int 242nv04_gart_manager_new(struct ttm_mem_type_manager *man, 243 struct ttm_buffer_object *bo, 244 const struct ttm_place *place, 245 struct ttm_mem_reg *mem) 246{ 247 struct nvkm_mem *node; 248 int ret; 249 250 node = kzalloc(sizeof(*node), GFP_KERNEL); 251 if (!node) 252 return -ENOMEM; 253 254 node->page_shift = 12; 255 256 ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift, 257 NV_MEM_ACCESS_RW, &node->vma[0]); 258 if (ret) { 259 kfree(node); 260 return ret; 261 } 262 263 mem->mm_node = node; 264 mem->start = node->vma[0].offset >> PAGE_SHIFT; 265 return 0; 266} 267 268static void 269nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) 270{ 271} 272 273const struct ttm_mem_type_manager_func nv04_gart_manager = { 274 nv04_gart_manager_init, 275 nv04_gart_manager_fini, 276 nv04_gart_manager_new, 277 nv04_gart_manager_del, 278 nv04_gart_manager_debug 279}; 280 281int 282nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) 283{ 284 struct drm_file *file_priv = filp->private_data; 285 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 286 287 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 288 return drm_legacy_mmap(filp, vma); 289 290 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 291} 292 293static int 294nouveau_ttm_mem_global_init(struct drm_global_reference *ref) 295{ 296 return ttm_mem_global_init(ref->object); 297} 298 299static void 300nouveau_ttm_mem_global_release(struct drm_global_reference *ref) 301{ 302 ttm_mem_global_release(ref->object); 303} 304 305int 306nouveau_ttm_global_init(struct nouveau_drm *drm) 307{ 308 struct drm_global_reference *global_ref; 309 int ret; 310 311 global_ref = &drm->ttm.mem_global_ref; 312 global_ref->global_type = DRM_GLOBAL_TTM_MEM; 313 global_ref->size = sizeof(struct ttm_mem_global); 314 global_ref->init = &nouveau_ttm_mem_global_init; 315 global_ref->release = &nouveau_ttm_mem_global_release; 316 317 ret = drm_global_item_ref(global_ref); 318 if (unlikely(ret != 0)) { 319 DRM_ERROR("Failed setting up TTM memory accounting\n"); 320 drm->ttm.mem_global_ref.release = NULL; 321 return ret; 322 } 323 324 drm->ttm.bo_global_ref.mem_glob = global_ref->object; 325 global_ref = &drm->ttm.bo_global_ref.ref; 326 global_ref->global_type = DRM_GLOBAL_TTM_BO; 327 global_ref->size = sizeof(struct ttm_bo_global); 328 global_ref->init = &ttm_bo_global_init; 329 global_ref->release = &ttm_bo_global_release; 330 331 ret = drm_global_item_ref(global_ref); 332 if (unlikely(ret != 0)) { 333 DRM_ERROR("Failed setting up TTM BO subsystem\n"); 334 drm_global_item_unref(&drm->ttm.mem_global_ref); 335 drm->ttm.mem_global_ref.release = NULL; 336 return ret; 337 } 338 339 return 0; 340} 341 342void 343nouveau_ttm_global_release(struct nouveau_drm *drm) 344{ 345 if (drm->ttm.mem_global_ref.release == NULL) 346 return; 347 348 drm_global_item_unref(&drm->ttm.bo_global_ref.ref); 349 drm_global_item_unref(&drm->ttm.mem_global_ref); 350 drm->ttm.mem_global_ref.release = NULL; 351} 352 353int 354nouveau_ttm_init(struct nouveau_drm *drm) 355{ 356 struct drm_device *dev = drm->dev; 357 u32 bits; 358 int ret; 359 360 bits = nvxx_mmu(&drm->device)->dma_bits; 361 if (nv_device_is_pci(nvxx_device(&drm->device))) { 362 if (drm->agp.stat == ENABLED || 363 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) 364 bits = 32; 365 366 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); 367 if (ret) 368 return ret; 369 370 ret = pci_set_consistent_dma_mask(dev->pdev, 371 DMA_BIT_MASK(bits)); 372 if (ret) 373 pci_set_consistent_dma_mask(dev->pdev, 374 DMA_BIT_MASK(32)); 375 } 376 377 ret = nouveau_ttm_global_init(drm); 378 if (ret) 379 return ret; 380 381 ret = ttm_bo_device_init(&drm->ttm.bdev, 382 drm->ttm.bo_global_ref.ref.object, 383 &nouveau_bo_driver, 384 dev->anon_inode->i_mapping, 385 DRM_FILE_PAGE_OFFSET, 386 bits <= 32 ? true : false); 387 if (ret) { 388 NV_ERROR(drm, "error initialising bo driver, %d\n", ret); 389 return ret; 390 } 391 392 /* VRAM init */ 393 drm->gem.vram_available = drm->device.info.ram_user; 394 395 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, 396 drm->gem.vram_available >> PAGE_SHIFT); 397 if (ret) { 398 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret); 399 return ret; 400 } 401 402 drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1), 403 nv_device_resource_len(nvxx_device(&drm->device), 1)); 404 405 /* GART init */ 406 if (drm->agp.stat != ENABLED) { 407 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit; 408 } else { 409 drm->gem.gart_available = drm->agp.size; 410 } 411 412 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, 413 drm->gem.gart_available >> PAGE_SHIFT); 414 if (ret) { 415 NV_ERROR(drm, "GART mm init failed, %d\n", ret); 416 return ret; 417 } 418 419 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); 420 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); 421 return 0; 422} 423 424void 425nouveau_ttm_fini(struct nouveau_drm *drm) 426{ 427 mutex_lock(&drm->dev->struct_mutex); 428 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); 429 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); 430 mutex_unlock(&drm->dev->struct_mutex); 431 432 ttm_bo_device_release(&drm->ttm.bdev); 433 434 nouveau_ttm_global_release(drm); 435 436 arch_phys_wc_del(drm->ttm.mtrr); 437 drm->ttm.mtrr = 0; 438} 439