root/drivers/char/agp/hp-agp.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. hp_zx1_ioc_shared
  2. hp_zx1_ioc_owner
  3. hp_zx1_ioc_init
  4. hp_zx1_lba_find_capability
  5. hp_zx1_lba_init
  6. hp_zx1_fetch_size
  7. hp_zx1_configure
  8. hp_zx1_cleanup
  9. hp_zx1_tlbflush
  10. hp_zx1_create_gatt_table
  11. hp_zx1_free_gatt_table
  12. hp_zx1_insert_memory
  13. hp_zx1_remove_memory
  14. hp_zx1_mask_memory
  15. hp_zx1_enable
  16. hp_zx1_setup
  17. zx1_gart_probe
  18. agp_hp_init
  19. agp_hp_cleanup

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * HP zx1 AGPGART routines.
   4  *
   5  * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
   6  *      Bjorn Helgaas <bjorn.helgaas@hp.com>
   7  */
   8 
   9 #include <linux/acpi.h>
  10 #include <linux/module.h>
  11 #include <linux/pci.h>
  12 #include <linux/init.h>
  13 #include <linux/agp_backend.h>
  14 #include <linux/log2.h>
  15 #include <linux/slab.h>
  16 
  17 #include <asm/acpi-ext.h>
  18 
  19 #include "agp.h"
  20 
  21 #define HP_ZX1_IOC_OFFSET       0x1000  /* ACPI reports SBA, we want IOC */
  22 
  23 /* HP ZX1 IOC registers */
  24 #define HP_ZX1_IBASE            0x300
  25 #define HP_ZX1_IMASK            0x308
  26 #define HP_ZX1_PCOM             0x310
  27 #define HP_ZX1_TCNFG            0x318
  28 #define HP_ZX1_PDIR_BASE        0x320
  29 
  30 #define HP_ZX1_IOVA_BASE        GB(1UL)
  31 #define HP_ZX1_IOVA_SIZE        GB(1UL)
  32 #define HP_ZX1_GART_SIZE        (HP_ZX1_IOVA_SIZE / 2)
  33 #define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
  34 
  35 #define HP_ZX1_PDIR_VALID_BIT   0x8000000000000000UL
  36 #define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
  37 
  38 #define AGP8X_MODE_BIT          3
  39 #define AGP8X_MODE              (1 << AGP8X_MODE_BIT)
  40 
  41 /* AGP bridge need not be PCI device, but DRM thinks it is. */
  42 static struct pci_dev fake_bridge_dev;
  43 
  44 static int hp_zx1_gart_found;
  45 
  46 static struct aper_size_info_fixed hp_zx1_sizes[] =
  47 {
  48         {0, 0, 0},              /* filled in by hp_zx1_fetch_size() */
  49 };
  50 
  51 static struct gatt_mask hp_zx1_masks[] =
  52 {
  53         {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
  54 };
  55 
  56 static struct _hp_private {
  57         volatile u8 __iomem *ioc_regs;
  58         volatile u8 __iomem *lba_regs;
  59         int lba_cap_offset;
  60         u64 *io_pdir;           // PDIR for entire IOVA
  61         u64 *gatt;              // PDIR just for GART (subset of above)
  62         u64 gatt_entries;
  63         u64 iova_base;
  64         u64 gart_base;
  65         u64 gart_size;
  66         u64 io_pdir_size;
  67         int io_pdir_owner;      // do we own it, or share it with sba_iommu?
  68         int io_page_size;
  69         int io_tlb_shift;
  70         int io_tlb_ps;          // IOC ps config
  71         int io_pages_per_kpage;
  72 } hp_private;
  73 
  74 static int __init hp_zx1_ioc_shared(void)
  75 {
  76         struct _hp_private *hp = &hp_private;
  77 
  78         printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
  79 
  80         /*
  81          * IOC already configured by sba_iommu module; just use
  82          * its setup.  We assume:
  83          *      - IOVA space is 1Gb in size
  84          *      - first 512Mb is IOMMU, second 512Mb is GART
  85          */
  86         hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
  87         switch (hp->io_tlb_ps) {
  88                 case 0: hp->io_tlb_shift = 12; break;
  89                 case 1: hp->io_tlb_shift = 13; break;
  90                 case 2: hp->io_tlb_shift = 14; break;
  91                 case 3: hp->io_tlb_shift = 16; break;
  92                 default:
  93                         printk(KERN_ERR PFX "Invalid IOTLB page size "
  94                                "configuration 0x%x\n", hp->io_tlb_ps);
  95                         hp->gatt = NULL;
  96                         hp->gatt_entries = 0;
  97                         return -ENODEV;
  98         }
  99         hp->io_page_size = 1 << hp->io_tlb_shift;
 100         hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
 101 
 102         hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
 103         hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
 104 
 105         hp->gart_size = HP_ZX1_GART_SIZE;
 106         hp->gatt_entries = hp->gart_size / hp->io_page_size;
 107 
 108         hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
 109         hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
 110 
 111         if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
 112                 /* Normal case when no AGP device in system */
 113                 hp->gatt = NULL;
 114                 hp->gatt_entries = 0;
 115                 printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
 116                        "GART disabled\n");
 117                 return -ENODEV;
 118         }
 119 
 120         return 0;
 121 }
 122 
 123 static int __init
 124 hp_zx1_ioc_owner (void)
 125 {
 126         struct _hp_private *hp = &hp_private;
 127 
 128         printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
 129 
 130         /*
 131          * Select an IOV page size no larger than system page size.
 132          */
 133         if (PAGE_SIZE >= KB(64)) {
 134                 hp->io_tlb_shift = 16;
 135                 hp->io_tlb_ps = 3;
 136         } else if (PAGE_SIZE >= KB(16)) {
 137                 hp->io_tlb_shift = 14;
 138                 hp->io_tlb_ps = 2;
 139         } else if (PAGE_SIZE >= KB(8)) {
 140                 hp->io_tlb_shift = 13;
 141                 hp->io_tlb_ps = 1;
 142         } else {
 143                 hp->io_tlb_shift = 12;
 144                 hp->io_tlb_ps = 0;
 145         }
 146         hp->io_page_size = 1 << hp->io_tlb_shift;
 147         hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
 148 
 149         hp->iova_base = HP_ZX1_IOVA_BASE;
 150         hp->gart_size = HP_ZX1_GART_SIZE;
 151         hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
 152 
 153         hp->gatt_entries = hp->gart_size / hp->io_page_size;
 154         hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
 155 
 156         return 0;
 157 }
 158 
 159 static int __init
 160 hp_zx1_ioc_init (u64 hpa)
 161 {
 162         struct _hp_private *hp = &hp_private;
 163 
 164         hp->ioc_regs = ioremap(hpa, 1024);
 165         if (!hp->ioc_regs)
 166                 return -ENOMEM;
 167 
 168         /*
 169          * If the IOTLB is currently disabled, we can take it over.
 170          * Otherwise, we have to share with sba_iommu.
 171          */
 172         hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
 173 
 174         if (hp->io_pdir_owner)
 175                 return hp_zx1_ioc_owner();
 176 
 177         return hp_zx1_ioc_shared();
 178 }
 179 
 180 static int
 181 hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
 182 {
 183         u16 status;
 184         u8 pos, id;
 185         int ttl = 48;
 186 
 187         status = readw(hpa+PCI_STATUS);
 188         if (!(status & PCI_STATUS_CAP_LIST))
 189                 return 0;
 190         pos = readb(hpa+PCI_CAPABILITY_LIST);
 191         while (ttl-- && pos >= 0x40) {
 192                 pos &= ~3;
 193                 id = readb(hpa+pos+PCI_CAP_LIST_ID);
 194                 if (id == 0xff)
 195                         break;
 196                 if (id == cap)
 197                         return pos;
 198                 pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
 199         }
 200         return 0;
 201 }
 202 
 203 static int __init
 204 hp_zx1_lba_init (u64 hpa)
 205 {
 206         struct _hp_private *hp = &hp_private;
 207         int cap;
 208 
 209         hp->lba_regs = ioremap(hpa, 256);
 210         if (!hp->lba_regs)
 211                 return -ENOMEM;
 212 
 213         hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
 214 
 215         cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
 216         if (cap != PCI_CAP_ID_AGP) {
 217                 printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
 218                        cap, hp->lba_cap_offset);
 219                 iounmap(hp->lba_regs);
 220                 return -ENODEV;
 221         }
 222 
 223         return 0;
 224 }
 225 
 226 static int
 227 hp_zx1_fetch_size(void)
 228 {
 229         int size;
 230 
 231         size = hp_private.gart_size / MB(1);
 232         hp_zx1_sizes[0].size = size;
 233         agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
 234         return size;
 235 }
 236 
 237 static int
 238 hp_zx1_configure (void)
 239 {
 240         struct _hp_private *hp = &hp_private;
 241 
 242         agp_bridge->gart_bus_addr = hp->gart_base;
 243         agp_bridge->capndx = hp->lba_cap_offset;
 244         agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
 245 
 246         if (hp->io_pdir_owner) {
 247                 writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
 248                 readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
 249                 writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
 250                 readl(hp->ioc_regs+HP_ZX1_TCNFG);
 251                 writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
 252                 readl(hp->ioc_regs+HP_ZX1_IMASK);
 253                 writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
 254                 readl(hp->ioc_regs+HP_ZX1_IBASE);
 255                 writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
 256                 readl(hp->ioc_regs+HP_ZX1_PCOM);
 257         }
 258 
 259         return 0;
 260 }
 261 
 262 static void
 263 hp_zx1_cleanup (void)
 264 {
 265         struct _hp_private *hp = &hp_private;
 266 
 267         if (hp->ioc_regs) {
 268                 if (hp->io_pdir_owner) {
 269                         writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
 270                         readq(hp->ioc_regs+HP_ZX1_IBASE);
 271                 }
 272                 iounmap(hp->ioc_regs);
 273         }
 274         if (hp->lba_regs)
 275                 iounmap(hp->lba_regs);
 276 }
 277 
 278 static void
 279 hp_zx1_tlbflush (struct agp_memory *mem)
 280 {
 281         struct _hp_private *hp = &hp_private;
 282 
 283         writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
 284         readq(hp->ioc_regs+HP_ZX1_PCOM);
 285 }
 286 
 287 static int
 288 hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
 289 {
 290         struct _hp_private *hp = &hp_private;
 291         int i;
 292 
 293         if (hp->io_pdir_owner) {
 294                 hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
 295                                                 get_order(hp->io_pdir_size));
 296                 if (!hp->io_pdir) {
 297                         printk(KERN_ERR PFX "Couldn't allocate contiguous "
 298                                 "memory for I/O PDIR\n");
 299                         hp->gatt = NULL;
 300                         hp->gatt_entries = 0;
 301                         return -ENOMEM;
 302                 }
 303                 memset(hp->io_pdir, 0, hp->io_pdir_size);
 304 
 305                 hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
 306         }
 307 
 308         for (i = 0; i < hp->gatt_entries; i++) {
 309                 hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
 310         }
 311 
 312         return 0;
 313 }
 314 
 315 static int
 316 hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
 317 {
 318         struct _hp_private *hp = &hp_private;
 319 
 320         if (hp->io_pdir_owner)
 321                 free_pages((unsigned long) hp->io_pdir,
 322                             get_order(hp->io_pdir_size));
 323         else
 324                 hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
 325         return 0;
 326 }
 327 
 328 static int
 329 hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
 330 {
 331         struct _hp_private *hp = &hp_private;
 332         int i, k;
 333         off_t j, io_pg_start;
 334         int io_pg_count;
 335 
 336         if (type != mem->type ||
 337                 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
 338                 return -EINVAL;
 339         }
 340 
 341         io_pg_start = hp->io_pages_per_kpage * pg_start;
 342         io_pg_count = hp->io_pages_per_kpage * mem->page_count;
 343         if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
 344                 return -EINVAL;
 345         }
 346 
 347         j = io_pg_start;
 348         while (j < (io_pg_start + io_pg_count)) {
 349                 if (hp->gatt[j]) {
 350                         return -EBUSY;
 351                 }
 352                 j++;
 353         }
 354 
 355         if (!mem->is_flushed) {
 356                 global_cache_flush();
 357                 mem->is_flushed = true;
 358         }
 359 
 360         for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
 361                 unsigned long paddr;
 362 
 363                 paddr = page_to_phys(mem->pages[i]);
 364                 for (k = 0;
 365                      k < hp->io_pages_per_kpage;
 366                      k++, j++, paddr += hp->io_page_size) {
 367                         hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
 368                 }
 369         }
 370 
 371         agp_bridge->driver->tlb_flush(mem);
 372         return 0;
 373 }
 374 
 375 static int
 376 hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
 377 {
 378         struct _hp_private *hp = &hp_private;
 379         int i, io_pg_start, io_pg_count;
 380 
 381         if (type != mem->type ||
 382                 agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
 383                 return -EINVAL;
 384         }
 385 
 386         io_pg_start = hp->io_pages_per_kpage * pg_start;
 387         io_pg_count = hp->io_pages_per_kpage * mem->page_count;
 388         for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
 389                 hp->gatt[i] = agp_bridge->scratch_page;
 390         }
 391 
 392         agp_bridge->driver->tlb_flush(mem);
 393         return 0;
 394 }
 395 
 396 static unsigned long
 397 hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
 398 {
 399         return HP_ZX1_PDIR_VALID_BIT | addr;
 400 }
 401 
 402 static void
 403 hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
 404 {
 405         struct _hp_private *hp = &hp_private;
 406         u32 command;
 407 
 408         command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
 409         command = agp_collect_device_status(bridge, mode, command);
 410         command |= 0x00000100;
 411 
 412         writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
 413 
 414         agp_device_command(command, (mode & AGP8X_MODE) != 0);
 415 }
 416 
 417 const struct agp_bridge_driver hp_zx1_driver = {
 418         .owner                  = THIS_MODULE,
 419         .size_type              = FIXED_APER_SIZE,
 420         .configure              = hp_zx1_configure,
 421         .fetch_size             = hp_zx1_fetch_size,
 422         .cleanup                = hp_zx1_cleanup,
 423         .tlb_flush              = hp_zx1_tlbflush,
 424         .mask_memory            = hp_zx1_mask_memory,
 425         .masks                  = hp_zx1_masks,
 426         .agp_enable             = hp_zx1_enable,
 427         .cache_flush            = global_cache_flush,
 428         .create_gatt_table      = hp_zx1_create_gatt_table,
 429         .free_gatt_table        = hp_zx1_free_gatt_table,
 430         .insert_memory          = hp_zx1_insert_memory,
 431         .remove_memory          = hp_zx1_remove_memory,
 432         .alloc_by_type          = agp_generic_alloc_by_type,
 433         .free_by_type           = agp_generic_free_by_type,
 434         .agp_alloc_page         = agp_generic_alloc_page,
 435         .agp_alloc_pages        = agp_generic_alloc_pages,
 436         .agp_destroy_page       = agp_generic_destroy_page,
 437         .agp_destroy_pages      = agp_generic_destroy_pages,
 438         .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 439         .cant_use_aperture      = true,
 440 };
 441 
 442 static int __init
 443 hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
 444 {
 445         struct agp_bridge_data *bridge;
 446         int error = 0;
 447 
 448         error = hp_zx1_ioc_init(ioc_hpa);
 449         if (error)
 450                 goto fail;
 451 
 452         error = hp_zx1_lba_init(lba_hpa);
 453         if (error)
 454                 goto fail;
 455 
 456         bridge = agp_alloc_bridge();
 457         if (!bridge) {
 458                 error = -ENOMEM;
 459                 goto fail;
 460         }
 461         bridge->driver = &hp_zx1_driver;
 462 
 463         fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
 464         fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
 465         bridge->dev = &fake_bridge_dev;
 466 
 467         error = agp_add_bridge(bridge);
 468   fail:
 469         if (error)
 470                 hp_zx1_cleanup();
 471         return error;
 472 }
 473 
 474 static acpi_status __init
 475 zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
 476 {
 477         acpi_handle handle, parent;
 478         acpi_status status;
 479         struct acpi_device_info *info;
 480         u64 lba_hpa, sba_hpa, length;
 481         int match;
 482 
 483         status = hp_acpi_csr_space(obj, &lba_hpa, &length);
 484         if (ACPI_FAILURE(status))
 485                 return AE_OK; /* keep looking for another bridge */
 486 
 487         /* Look for an enclosing IOC scope and find its CSR space */
 488         handle = obj;
 489         do {
 490                 status = acpi_get_object_info(handle, &info);
 491                 if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
 492                         /* TBD check _CID also */
 493                         match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
 494                         kfree(info);
 495                         if (match) {
 496                                 status = hp_acpi_csr_space(handle, &sba_hpa, &length);
 497                                 if (ACPI_SUCCESS(status))
 498                                         break;
 499                                 else {
 500                                         printk(KERN_ERR PFX "Detected HP ZX1 "
 501                                                "AGP LBA but no IOC.\n");
 502                                         return AE_OK;
 503                                 }
 504                         }
 505                 }
 506 
 507                 status = acpi_get_parent(handle, &parent);
 508                 handle = parent;
 509         } while (ACPI_SUCCESS(status));
 510 
 511         if (ACPI_FAILURE(status))
 512                 return AE_OK;   /* found no enclosing IOC */
 513 
 514         if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
 515                 return AE_OK;
 516 
 517         printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
 518                 "(ioc=%llx, lba=%llx)\n", (char *)context,
 519                 sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
 520 
 521         hp_zx1_gart_found = 1;
 522         return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
 523 }
 524 
 525 static int __init
 526 agp_hp_init (void)
 527 {
 528         if (agp_off)
 529                 return -EINVAL;
 530 
 531         acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
 532         if (hp_zx1_gart_found)
 533                 return 0;
 534 
 535         acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
 536         if (hp_zx1_gart_found)
 537                 return 0;
 538 
 539         return -ENODEV;
 540 }
 541 
 542 static void __exit
 543 agp_hp_cleanup (void)
 544 {
 545 }
 546 
 547 module_init(agp_hp_init);
 548 module_exit(agp_hp_cleanup);
 549 
 550 MODULE_LICENSE("GPL and additional rights");

/* [<][>][^][v][top][bottom][index][help] */