This source file includes following definitions.
- psb_gtt_mask_pte
- psb_gtt_entry
- psb_gtt_insert
- psb_gtt_remove
- psb_gtt_roll
- psb_gtt_attach_pages
- psb_gtt_detach_pages
- psb_gtt_pin
- psb_gtt_unpin
- psb_gtt_alloc_range
- psb_gtt_free_range
- psb_gtt_alloc
- psb_gtt_takedown
- psb_gtt_init
- psb_gtt_restore
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 #include <linux/shmem_fs.h>
  11 
  12 #include <asm/set_memory.h>
  13 
  14 #include "blitter.h"
  15 #include "psb_drv.h"
  16 
  17 
  18 
  19 
  20 
  21 
  22 
  23 
  24 
  25 
  26 
  27 
  28 
  29 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
  30 {
  31         uint32_t mask = PSB_PTE_VALID;
  32 
  33         
  34 
  35         BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
  36 
  37         if (type & PSB_MMU_CACHED_MEMORY)
  38                 mask |= PSB_PTE_CACHED;
  39         if (type & PSB_MMU_RO_MEMORY)
  40                 mask |= PSB_PTE_RO;
  41         if (type & PSB_MMU_WO_MEMORY)
  42                 mask |= PSB_PTE_WO;
  43 
  44         return (pfn << PAGE_SHIFT) | mask;
  45 }
  46 
  47 
  48 
  49 
  50 
  51 
  52 
  53 
  54 
  55 static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
  56 {
  57         struct drm_psb_private *dev_priv = dev->dev_private;
  58         unsigned long offset;
  59 
  60         offset = r->resource.start - dev_priv->gtt_mem->start;
  61 
  62         return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
  63 }
  64 
  65 
  66 
  67 
  68 
  69 
  70 
  71 
  72 
  73 
  74 
  75 static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
  76                           int resume)
  77 {
  78         u32 __iomem *gtt_slot;
  79         u32 pte;
  80         struct page **pages;
  81         int i;
  82 
  83         if (r->pages == NULL) {
  84                 WARN_ON(1);
  85                 return -EINVAL;
  86         }
  87 
  88         WARN_ON(r->stolen);     
  89 
  90         gtt_slot = psb_gtt_entry(dev, r);
  91         pages = r->pages;
  92 
  93         if (!resume) {
  94                 
  95                 set_pages_array_wc(pages, r->npage);
  96         }
  97 
  98         
  99         for (i = r->roll; i < r->npage; i++) {
 100                 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
 101                                        PSB_MMU_CACHED_MEMORY);
 102                 iowrite32(pte, gtt_slot++);
 103         }
 104         for (i = 0; i < r->roll; i++) {
 105                 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
 106                                        PSB_MMU_CACHED_MEMORY);
 107                 iowrite32(pte, gtt_slot++);
 108         }
 109         
 110         ioread32(gtt_slot - 1);
 111 
 112         return 0;
 113 }
 114 
 115 
 116 
 117 
 118 
 119 
 120 
 121 
 122 
 123 
 124 static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
 125 {
 126         struct drm_psb_private *dev_priv = dev->dev_private;
 127         u32 __iomem *gtt_slot;
 128         u32 pte;
 129         int i;
 130 
 131         WARN_ON(r->stolen);
 132 
 133         gtt_slot = psb_gtt_entry(dev, r);
 134         pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
 135                                PSB_MMU_CACHED_MEMORY);
 136 
 137         for (i = 0; i < r->npage; i++)
 138                 iowrite32(pte, gtt_slot++);
 139         ioread32(gtt_slot - 1);
 140         set_pages_array_wb(r->pages, r->npage);
 141 }
 142 
 143 
 144 
 145 
 146 
 147 
 148 
 149 
 150 
 151 
 152 
 153 void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
 154 {
 155         u32 __iomem *gtt_slot;
 156         u32 pte;
 157         int i;
 158 
 159         if (roll >= r->npage) {
 160                 WARN_ON(1);
 161                 return;
 162         }
 163 
 164         r->roll = roll;
 165 
 166         
 167 
 168         if (!r->stolen && !r->in_gart)
 169                 return;
 170 
 171         gtt_slot = psb_gtt_entry(dev, r);
 172 
 173         for (i = r->roll; i < r->npage; i++) {
 174                 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
 175                                        PSB_MMU_CACHED_MEMORY);
 176                 iowrite32(pte, gtt_slot++);
 177         }
 178         for (i = 0; i < r->roll; i++) {
 179                 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
 180                                        PSB_MMU_CACHED_MEMORY);
 181                 iowrite32(pte, gtt_slot++);
 182         }
 183         ioread32(gtt_slot - 1);
 184 }
 185 
 186 
 187 
 188 
 189 
 190 
 191 
 192 
 193 
 194 static int psb_gtt_attach_pages(struct gtt_range *gt)
 195 {
 196         struct page **pages;
 197 
 198         WARN_ON(gt->pages);
 199 
 200         pages = drm_gem_get_pages(>->gem);
 201         if (IS_ERR(pages))
 202                 return PTR_ERR(pages);
 203 
 204         gt->npage = gt->gem.size / PAGE_SIZE;
 205         gt->pages = pages;
 206 
 207         return 0;
 208 }
 209 
 210 
 211 
 212 
 213 
 214 
 215 
 216 
 217 
 218 
 219 static void psb_gtt_detach_pages(struct gtt_range *gt)
 220 {
 221         drm_gem_put_pages(>->gem, gt->pages, true, false);
 222         gt->pages = NULL;
 223 }
 224 
 225 
 226 
 227 
 228 
 229 
 230 
 231 
 232 
 233 
 234 
 235 int psb_gtt_pin(struct gtt_range *gt)
 236 {
 237         int ret = 0;
 238         struct drm_device *dev = gt->gem.dev;
 239         struct drm_psb_private *dev_priv = dev->dev_private;
 240         u32 gpu_base = dev_priv->gtt.gatt_start;
 241 
 242         mutex_lock(&dev_priv->gtt_mutex);
 243 
 244         if (gt->in_gart == 0 && gt->stolen == 0) {
 245                 ret = psb_gtt_attach_pages(gt);
 246                 if (ret < 0)
 247                         goto out;
 248                 ret = psb_gtt_insert(dev, gt, 0);
 249                 if (ret < 0) {
 250                         psb_gtt_detach_pages(gt);
 251                         goto out;
 252                 }
 253                 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
 254                                      gt->pages, (gpu_base + gt->offset),
 255                                      gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
 256         }
 257         gt->in_gart++;
 258 out:
 259         mutex_unlock(&dev_priv->gtt_mutex);
 260         return ret;
 261 }
 262 
 263 
 264 
 265 
 266 
 267 
 268 
 269 
 270 
 271 
 272 
 273 
 274 void psb_gtt_unpin(struct gtt_range *gt)
 275 {
 276         struct drm_device *dev = gt->gem.dev;
 277         struct drm_psb_private *dev_priv = dev->dev_private;
 278         u32 gpu_base = dev_priv->gtt.gatt_start;
 279         int ret;
 280 
 281         
 282         mutex_lock(&dev_priv->gtt_mutex);
 283 
 284         
 285         ret = gma_blt_wait_idle(dev_priv);
 286         if (ret) {
 287                 DRM_ERROR("Failed to idle the blitter, unpin failed!");
 288                 goto out;
 289         }
 290 
 291         WARN_ON(!gt->in_gart);
 292 
 293         gt->in_gart--;
 294         if (gt->in_gart == 0 && gt->stolen == 0) {
 295                 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
 296                                      (gpu_base + gt->offset), gt->npage, 0, 0);
 297                 psb_gtt_remove(dev, gt);
 298                 psb_gtt_detach_pages(gt);
 299         }
 300 
 301 out:
 302         mutex_unlock(&dev_priv->gtt_mutex);
 303 }
 304 
 305 
 306 
 307 
 308 
 309 
 310 
 311 
 312 
 313 
 314 
 315 
 316 
 317 
 318 
 319 
 320 
 321 
 322 
 323 
 324 struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
 325                                       const char *name, int backed, u32 align)
 326 {
 327         struct drm_psb_private *dev_priv = dev->dev_private;
 328         struct gtt_range *gt;
 329         struct resource *r = dev_priv->gtt_mem;
 330         int ret;
 331         unsigned long start, end;
 332 
 333         if (backed) {
 334                 
 335                 start = r->start;
 336                 end = r->start + dev_priv->gtt.stolen_size - 1;
 337         } else {
 338                 
 339                 start = r->start + dev_priv->gtt.stolen_size;
 340                 end = r->end;
 341         }
 342 
 343         gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
 344         if (gt == NULL)
 345                 return NULL;
 346         gt->resource.name = name;
 347         gt->stolen = backed;
 348         gt->in_gart = backed;
 349         gt->roll = 0;
 350         
 351         gt->gem.dev = dev;
 352         ret = allocate_resource(dev_priv->gtt_mem, >->resource,
 353                                 len, start, end, align, NULL, NULL);
 354         if (ret == 0) {
 355                 gt->offset = gt->resource.start - r->start;
 356                 return gt;
 357         }
 358         kfree(gt);
 359         return NULL;
 360 }
 361 
 362 
 363 
 364 
 365 
 366 
 367 
 368 
 369 
 370 void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
 371 {
 372         
 373         if (gt->mmapping) {
 374                 psb_gtt_unpin(gt);
 375                 gt->mmapping = 0;
 376         }
 377         WARN_ON(gt->in_gart && !gt->stolen);
 378         release_resource(>->resource);
 379         kfree(gt);
 380 }
 381 
 382 static void psb_gtt_alloc(struct drm_device *dev)
 383 {
 384         struct drm_psb_private *dev_priv = dev->dev_private;
 385         init_rwsem(&dev_priv->gtt.sem);
 386 }
 387 
 388 void psb_gtt_takedown(struct drm_device *dev)
 389 {
 390         struct drm_psb_private *dev_priv = dev->dev_private;
 391 
 392         if (dev_priv->gtt_map) {
 393                 iounmap(dev_priv->gtt_map);
 394                 dev_priv->gtt_map = NULL;
 395         }
 396         if (dev_priv->gtt_initialized) {
 397                 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
 398                                       dev_priv->gmch_ctrl);
 399                 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
 400                 (void) PSB_RVDC32(PSB_PGETBL_CTL);
 401         }
 402         if (dev_priv->vram_addr)
 403                 iounmap(dev_priv->gtt_map);
 404 }
 405 
 406 int psb_gtt_init(struct drm_device *dev, int resume)
 407 {
 408         struct drm_psb_private *dev_priv = dev->dev_private;
 409         unsigned gtt_pages;
 410         unsigned long stolen_size, vram_stolen_size;
 411         unsigned i, num_pages;
 412         unsigned pfn_base;
 413         struct psb_gtt *pg;
 414 
 415         int ret = 0;
 416         uint32_t pte;
 417 
 418         if (!resume) {
 419                 mutex_init(&dev_priv->gtt_mutex);
 420                 mutex_init(&dev_priv->mmap_mutex);
 421                 psb_gtt_alloc(dev);
 422         }
 423 
 424         pg = &dev_priv->gtt;
 425 
 426         
 427         pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
 428         pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
 429                               dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
 430 
 431         dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
 432         PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
 433         (void) PSB_RVDC32(PSB_PGETBL_CTL);
 434 
 435         
 436         dev_priv->gtt_initialized = 1;
 437 
 438         pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
 439 
 440         
 441 
 442 
 443 
 444 
 445 
 446         pg->mmu_gatt_start = 0xE0000000;
 447 
 448         pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
 449         gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
 450                                                                 >> PAGE_SHIFT;
 451         
 452         if (pg->gtt_start == 0 || gtt_pages == 0) {
 453                 dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
 454                 gtt_pages = 64;
 455                 pg->gtt_start = dev_priv->pge_ctl;
 456         }
 457 
 458         pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
 459         pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
 460                                                                 >> PAGE_SHIFT;
 461         dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
 462 
 463         if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
 464                 static struct resource fudge;   
 465                 
 466 
 467 
 468                 dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
 469                 pg->gatt_start = 0x40000000;
 470                 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
 471                 
 472 
 473 
 474 
 475                 fudge.start = 0x40000000;
 476                 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
 477                 fudge.name = "fudge";
 478                 fudge.flags = IORESOURCE_MEM;
 479                 dev_priv->gtt_mem = &fudge;
 480         }
 481 
 482         pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
 483         vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
 484                                                                 - PAGE_SIZE;
 485 
 486         stolen_size = vram_stolen_size;
 487 
 488         dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
 489                         dev_priv->stolen_base, vram_stolen_size / 1024);
 490 
 491         if (resume && (gtt_pages != pg->gtt_pages) &&
 492             (stolen_size != pg->stolen_size)) {
 493                 dev_err(dev->dev, "GTT resume error.\n");
 494                 ret = -EINVAL;
 495                 goto out_err;
 496         }
 497 
 498         pg->gtt_pages = gtt_pages;
 499         pg->stolen_size = stolen_size;
 500         dev_priv->vram_stolen_size = vram_stolen_size;
 501 
 502         
 503 
 504 
 505         if (!resume)
 506                 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
 507                                                 gtt_pages << PAGE_SHIFT);
 508         if (!dev_priv->gtt_map) {
 509                 dev_err(dev->dev, "Failure to map gtt.\n");
 510                 ret = -ENOMEM;
 511                 goto out_err;
 512         }
 513 
 514         if (!resume)
 515                 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
 516                                                  stolen_size);
 517 
 518         if (!dev_priv->vram_addr) {
 519                 dev_err(dev->dev, "Failure to map stolen base.\n");
 520                 ret = -ENOMEM;
 521                 goto out_err;
 522         }
 523 
 524         
 525 
 526 
 527 
 528         pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
 529         num_pages = vram_stolen_size >> PAGE_SHIFT;
 530         dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
 531                 num_pages, pfn_base << PAGE_SHIFT, 0);
 532         for (i = 0; i < num_pages; ++i) {
 533                 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
 534                 iowrite32(pte, dev_priv->gtt_map + i);
 535         }
 536 
 537         
 538 
 539 
 540 
 541         pfn_base = page_to_pfn(dev_priv->scratch_page);
 542         pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
 543         for (; i < gtt_pages; ++i)
 544                 iowrite32(pte, dev_priv->gtt_map + i);
 545 
 546         (void) ioread32(dev_priv->gtt_map + i - 1);
 547         return 0;
 548 
 549 out_err:
 550         psb_gtt_takedown(dev);
 551         return ret;
 552 }
 553 
 554 int psb_gtt_restore(struct drm_device *dev)
 555 {
 556         struct drm_psb_private *dev_priv = dev->dev_private;
 557         struct resource *r = dev_priv->gtt_mem->child;
 558         struct gtt_range *range;
 559         unsigned int restored = 0, total = 0, size = 0;
 560 
 561         
 562         mutex_lock(&dev_priv->gtt_mutex);
 563         psb_gtt_init(dev, 1);
 564 
 565         while (r != NULL) {
 566                 range = container_of(r, struct gtt_range, resource);
 567                 if (range->pages) {
 568                         psb_gtt_insert(dev, range, 1);
 569                         size += range->resource.end - range->resource.start;
 570                         restored++;
 571                 }
 572                 r = r->sibling;
 573                 total++;
 574         }
 575         mutex_unlock(&dev_priv->gtt_mutex);
 576         DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
 577                          total, (size / 1024));
 578 
 579         return 0;
 580 }