root/arch/alpha/kernel/core_titan.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mk_tig_addr
  2. titan_read_tig
  3. titan_write_tig
  4. mk_conf_addr
  5. titan_read_config
  6. titan_write_config
  7. titan_pci_tbi
  8. titan_query_agp
  9. titan_init_one_pachip_port
  10. titan_init_pachips
  11. titan_init_arch
  12. titan_kill_one_pachip_port
  13. titan_kill_pachips
  14. titan_kill_arch
  15. titan_ioportmap
  16. titan_ioremap
  17. titan_iounmap
  18. titan_is_mmio
  19. titan_agp_setup
  20. titan_agp_cleanup
  21. titan_agp_configure
  22. titan_agp_bind_memory
  23. titan_agp_unbind_memory
  24. titan_agp_translate
  25. titan_agp_info

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *      linux/arch/alpha/kernel/core_titan.c
   4  *
   5  * Code common to all TITAN core logic chips.
   6  */
   7 
   8 #define __EXTERN_INLINE inline
   9 #include <asm/io.h>
  10 #include <asm/core_titan.h>
  11 #undef __EXTERN_INLINE
  12 
  13 #include <linux/module.h>
  14 #include <linux/types.h>
  15 #include <linux/pci.h>
  16 #include <linux/sched.h>
  17 #include <linux/init.h>
  18 #include <linux/vmalloc.h>
  19 #include <linux/memblock.h>
  20 
  21 #include <asm/ptrace.h>
  22 #include <asm/smp.h>
  23 #include <asm/pgalloc.h>
  24 #include <asm/tlbflush.h>
  25 #include <asm/vga.h>
  26 
  27 #include "proto.h"
  28 #include "pci_impl.h"
  29 
  30 /* Save Titan configuration data as the console had it set up.  */
  31 
  32 struct
  33 {
  34         unsigned long wsba[4];
  35         unsigned long wsm[4];
  36         unsigned long tba[4];
  37 } saved_config[4] __attribute__((common));
  38 
  39 /*
  40  * Is PChip 1 present? No need to query it more than once.
  41  */
  42 static int titan_pchip1_present;
  43 
  44 /*
  45  * BIOS32-style PCI interface:
  46  */
  47 
  48 #define DEBUG_CONFIG 0
  49 
  50 #if DEBUG_CONFIG
  51 # define DBG_CFG(args)  printk args
  52 #else
  53 # define DBG_CFG(args)
  54 #endif
  55 
  56 
  57 /*
  58  * Routines to access TIG registers.
  59  */
  60 static inline volatile unsigned long *
  61 mk_tig_addr(int offset)
  62 {
  63         return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
  64 }
  65 
  66 static inline u8 
  67 titan_read_tig(int offset, u8 value)
  68 {
  69         volatile unsigned long *tig_addr = mk_tig_addr(offset);
  70         return (u8)(*tig_addr & 0xff);
  71 }
  72 
  73 static inline void 
  74 titan_write_tig(int offset, u8 value)
  75 {
  76         volatile unsigned long *tig_addr = mk_tig_addr(offset);
  77         *tig_addr = (unsigned long)value;
  78 }
  79 
  80 
  81 /*
  82  * Given a bus, device, and function number, compute resulting
  83  * configuration space address
  84  * accordingly.  It is therefore not safe to have concurrent
  85  * invocations to configuration space access routines, but there
  86  * really shouldn't be any need for this.
  87  *
  88  * Note that all config space accesses use Type 1 address format.
  89  *
  90  * Note also that type 1 is determined by non-zero bus number.
  91  *
  92  * Type 1:
  93  *
  94  *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1 
  95  *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
  96  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  97  * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
  98  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  99  *
 100  *      31:24   reserved
 101  *      23:16   bus number (8 bits = 128 possible buses)
 102  *      15:11   Device number (5 bits)
 103  *      10:8    function number
 104  *       7:2    register number
 105  *  
 106  * Notes:
 107  *      The function number selects which function of a multi-function device 
 108  *      (e.g., SCSI and Ethernet).
 109  * 
 110  *      The register selects a DWORD (32 bit) register offset.  Hence it
 111  *      doesn't get shifted by 2 bits as we want to "drop" the bottom two
 112  *      bits.
 113  */
 114 
 115 static int
 116 mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
 117              unsigned long *pci_addr, unsigned char *type1)
 118 {
 119         struct pci_controller *hose = pbus->sysdata;
 120         unsigned long addr;
 121         u8 bus = pbus->number;
 122 
 123         DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
 124                  "pci_addr=0x%p, type1=0x%p)\n",
 125                  bus, device_fn, where, pci_addr, type1));
 126 
 127         if (!pbus->parent) /* No parent means peer PCI bus. */
 128                 bus = 0;
 129         *type1 = (bus != 0);
 130 
 131         addr = (bus << 16) | (device_fn << 8) | where;
 132         addr |= hose->config_space_base;
 133                 
 134         *pci_addr = addr;
 135         DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
 136         return 0;
 137 }
 138 
 139 static int
 140 titan_read_config(struct pci_bus *bus, unsigned int devfn, int where,
 141                   int size, u32 *value)
 142 {
 143         unsigned long addr;
 144         unsigned char type1;
 145 
 146         if (mk_conf_addr(bus, devfn, where, &addr, &type1))
 147                 return PCIBIOS_DEVICE_NOT_FOUND;
 148 
 149         switch (size) {
 150         case 1:
 151                 *value = __kernel_ldbu(*(vucp)addr);
 152                 break;
 153         case 2:
 154                 *value = __kernel_ldwu(*(vusp)addr);
 155                 break;
 156         case 4:
 157                 *value = *(vuip)addr;
 158                 break;
 159         }
 160 
 161         return PCIBIOS_SUCCESSFUL;
 162 }
 163 
 164 static int 
 165 titan_write_config(struct pci_bus *bus, unsigned int devfn, int where,
 166                    int size, u32 value)
 167 {
 168         unsigned long addr;
 169         unsigned char type1;
 170 
 171         if (mk_conf_addr(bus, devfn, where, &addr, &type1))
 172                 return PCIBIOS_DEVICE_NOT_FOUND;
 173 
 174         switch (size) {
 175         case 1:
 176                 __kernel_stb(value, *(vucp)addr);
 177                 mb();
 178                 __kernel_ldbu(*(vucp)addr);
 179                 break;
 180         case 2:
 181                 __kernel_stw(value, *(vusp)addr);
 182                 mb();
 183                 __kernel_ldwu(*(vusp)addr);
 184                 break;
 185         case 4:
 186                 *(vuip)addr = value;
 187                 mb();
 188                 *(vuip)addr;
 189                 break;
 190         }
 191 
 192         return PCIBIOS_SUCCESSFUL;
 193 }
 194 
 195 struct pci_ops titan_pci_ops = 
 196 {
 197         .read =         titan_read_config,
 198         .write =        titan_write_config,
 199 };
 200 
 201 
 202 void
 203 titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
 204 {
 205         titan_pachip *pachip = 
 206           (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
 207         titan_pachip_port *port;
 208         volatile unsigned long *csr;
 209         unsigned long value;
 210 
 211         /* Get the right hose.  */
 212         port = &pachip->g_port;
 213         if (hose->index & 2) 
 214                 port = &pachip->a_port;
 215 
 216         /* We can invalidate up to 8 tlb entries in a go.  The flush
 217            matches against <31:16> in the pci address.  
 218            Note that gtlbi* and atlbi* are in the same place in the g_port
 219            and a_port, respectively, so the g_port offset can be used
 220            even if hose is an a_port */
 221         csr = &port->port_specific.g.gtlbia.csr;
 222         if (((start ^ end) & 0xffff0000) == 0)
 223                 csr = &port->port_specific.g.gtlbiv.csr;
 224 
 225         /* For TBIA, it doesn't matter what value we write.  For TBI, 
 226            it's the shifted tag bits.  */
 227         value = (start & 0xffff0000) >> 12;
 228 
 229         wmb();
 230         *csr = value;
 231         mb();
 232         *csr;
 233 }
 234 
 235 static int
 236 titan_query_agp(titan_pachip_port *port)
 237 {
 238         union TPAchipPCTL pctl;
 239 
 240         /* set up APCTL */
 241         pctl.pctl_q_whole = port->pctl.csr;
 242 
 243         return pctl.pctl_r_bits.apctl_v_agp_present;
 244 
 245 }
 246 
 247 static void __init
 248 titan_init_one_pachip_port(titan_pachip_port *port, int index)
 249 {
 250         struct pci_controller *hose;
 251 
 252         hose = alloc_pci_controller();
 253         if (index == 0)
 254                 pci_isa_hose = hose;
 255         hose->io_space = alloc_resource();
 256         hose->mem_space = alloc_resource();
 257 
 258         /*
 259          * This is for userland consumption.  The 40-bit PIO bias that we 
 260          * use in the kernel through KSEG doesn't work in the page table 
 261          * based user mappings. (43-bit KSEG sign extends the physical
 262          * address from bit 40 to hit the I/O bit - mapped addresses don't).
 263          * So make sure we get the 43-bit PIO bias.  
 264          */
 265         hose->sparse_mem_base = 0;
 266         hose->sparse_io_base = 0;
 267         hose->dense_mem_base
 268           = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL;
 269         hose->dense_io_base
 270           = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL;
 271 
 272         hose->config_space_base = TITAN_CONF(index);
 273         hose->index = index;
 274 
 275         hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
 276         hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
 277         hose->io_space->name = pci_io_names[index];
 278         hose->io_space->flags = IORESOURCE_IO;
 279 
 280         hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
 281         hose->mem_space->end = hose->mem_space->start + 0xffffffff;
 282         hose->mem_space->name = pci_mem_names[index];
 283         hose->mem_space->flags = IORESOURCE_MEM;
 284 
 285         if (request_resource(&ioport_resource, hose->io_space) < 0)
 286                 printk(KERN_ERR "Failed to request IO on hose %d\n", index);
 287         if (request_resource(&iomem_resource, hose->mem_space) < 0)
 288                 printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
 289 
 290         /*
 291          * Save the existing PCI window translations.  SRM will 
 292          * need them when we go to reboot.
 293          */
 294         saved_config[index].wsba[0] = port->wsba[0].csr;
 295         saved_config[index].wsm[0]  = port->wsm[0].csr;
 296         saved_config[index].tba[0]  = port->tba[0].csr;
 297 
 298         saved_config[index].wsba[1] = port->wsba[1].csr;
 299         saved_config[index].wsm[1]  = port->wsm[1].csr;
 300         saved_config[index].tba[1]  = port->tba[1].csr;
 301 
 302         saved_config[index].wsba[2] = port->wsba[2].csr;
 303         saved_config[index].wsm[2]  = port->wsm[2].csr;
 304         saved_config[index].tba[2]  = port->tba[2].csr;
 305 
 306         saved_config[index].wsba[3] = port->wsba[3].csr;
 307         saved_config[index].wsm[3]  = port->wsm[3].csr;
 308         saved_config[index].tba[3]  = port->tba[3].csr;
 309 
 310         /*
 311          * Set up the PCI to main memory translation windows.
 312          *
 313          * Note: Window 3 on Titan is Scatter-Gather ONLY.
 314          *
 315          * Window 0 is scatter-gather 8MB at 8MB (for isa)
 316          * Window 1 is direct access 1GB at 2GB
 317          * Window 2 is scatter-gather 1GB at 3GB
 318          */
 319         hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000,
 320                                        SMP_CACHE_BYTES);
 321         hose->sg_isa->align_entry = 8; /* 64KB for ISA */
 322 
 323         hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000,
 324                                        SMP_CACHE_BYTES);
 325         hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
 326 
 327         port->wsba[0].csr = hose->sg_isa->dma_base | 3;
 328         port->wsm[0].csr  = (hose->sg_isa->size - 1) & 0xfff00000;
 329         port->tba[0].csr  = virt_to_phys(hose->sg_isa->ptes);
 330 
 331         port->wsba[1].csr = __direct_map_base | 1;
 332         port->wsm[1].csr  = (__direct_map_size - 1) & 0xfff00000;
 333         port->tba[1].csr  = 0;
 334 
 335         port->wsba[2].csr = hose->sg_pci->dma_base | 3;
 336         port->wsm[2].csr  = (hose->sg_pci->size - 1) & 0xfff00000;
 337         port->tba[2].csr  = virt_to_phys(hose->sg_pci->ptes);
 338 
 339         port->wsba[3].csr = 0;
 340 
 341         /* Enable the Monster Window to make DAC pci64 possible.  */
 342         port->pctl.csr |= pctl_m_mwin;
 343 
 344         /*
 345          * If it's an AGP port, initialize agplastwr.
 346          */
 347         if (titan_query_agp(port)) 
 348                 port->port_specific.a.agplastwr.csr = __direct_map_base;
 349 
 350         titan_pci_tbi(hose, 0, -1);
 351 }
 352 
 353 static void __init
 354 titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
 355 {
 356         titan_pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
 357 
 358         /* Init the ports in hose order... */
 359         titan_init_one_pachip_port(&pachip0->g_port, 0);        /* hose 0 */
 360         if (titan_pchip1_present)
 361                 titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
 362         titan_init_one_pachip_port(&pachip0->a_port, 2);        /* hose 2 */
 363         if (titan_pchip1_present)
 364                 titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
 365 }
 366 
 367 void __init
 368 titan_init_arch(void)
 369 {
 370 #if 0
 371         printk("%s: titan_init_arch()\n", __func__);
 372         printk("%s: CChip registers:\n", __func__);
 373         printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr);
 374         printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr);
 375         printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr);
 376         printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr);
 377         printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr);
 378         printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr);
 379         printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr);
 380         printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr);
 381 
 382         printk("%s: DChip registers:\n", __func__);
 383         printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr);
 384         printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr);
 385         printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr);
 386 #endif
 387 
 388         boot_cpuid = __hard_smp_processor_id();
 389 
 390         /* With multiple PCI busses, we play with I/O as physical addrs.  */
 391         ioport_resource.end = ~0UL;
 392         iomem_resource.end = ~0UL;
 393 
 394         /* PCI DMA Direct Mapping is 1GB at 2GB.  */
 395         __direct_map_base = 0x80000000;
 396         __direct_map_size = 0x40000000;
 397 
 398         /* Init the PA chip(s).  */
 399         titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
 400 
 401         /* Check for graphic console location (if any).  */
 402         find_console_vga_hose();
 403 }
 404 
 405 static void
 406 titan_kill_one_pachip_port(titan_pachip_port *port, int index)
 407 {
 408         port->wsba[0].csr = saved_config[index].wsba[0];
 409         port->wsm[0].csr  = saved_config[index].wsm[0];
 410         port->tba[0].csr  = saved_config[index].tba[0];
 411 
 412         port->wsba[1].csr = saved_config[index].wsba[1];
 413         port->wsm[1].csr  = saved_config[index].wsm[1];
 414         port->tba[1].csr  = saved_config[index].tba[1];
 415 
 416         port->wsba[2].csr = saved_config[index].wsba[2];
 417         port->wsm[2].csr  = saved_config[index].wsm[2];
 418         port->tba[2].csr  = saved_config[index].tba[2];
 419 
 420         port->wsba[3].csr = saved_config[index].wsba[3];
 421         port->wsm[3].csr  = saved_config[index].wsm[3];
 422         port->tba[3].csr  = saved_config[index].tba[3];
 423 }
 424 
 425 static void
 426 titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
 427 {
 428         if (titan_pchip1_present) {
 429                 titan_kill_one_pachip_port(&pachip1->g_port, 1);
 430                 titan_kill_one_pachip_port(&pachip1->a_port, 3);
 431         }
 432         titan_kill_one_pachip_port(&pachip0->g_port, 0);
 433         titan_kill_one_pachip_port(&pachip0->a_port, 2);
 434 }
 435 
 436 void
 437 titan_kill_arch(int mode)
 438 {
 439         titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
 440 }
 441 
 442 
 443 /*
 444  * IO map support.
 445  */
 446 
 447 void __iomem *
 448 titan_ioportmap(unsigned long addr)
 449 {
 450         FIXUP_IOADDR_VGA(addr);
 451         return (void __iomem *)(addr + TITAN_IO_BIAS);
 452 }
 453 
 454 
 455 void __iomem *
 456 titan_ioremap(unsigned long addr, unsigned long size)
 457 {
 458         int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
 459         unsigned long baddr = addr & ~TITAN_HOSE_MASK;
 460         unsigned long last = baddr + size - 1;
 461         struct pci_controller *hose;    
 462         struct vm_struct *area;
 463         unsigned long vaddr;
 464         unsigned long *ptes;
 465         unsigned long pfn;
 466 
 467 #ifdef CONFIG_VGA_HOSE
 468         /*
 469          * Adjust the address and hose, if necessary.
 470          */ 
 471         if (pci_vga_hose && __is_mem_vga(addr)) {
 472                 h = pci_vga_hose->index;
 473                 addr += pci_vga_hose->mem_space->start;
 474         }
 475 #endif
 476 
 477         /*
 478          * Find the hose.
 479          */
 480         for (hose = hose_head; hose; hose = hose->next)
 481                 if (hose->index == h)
 482                         break;
 483         if (!hose)
 484                 return NULL;
 485 
 486         /*
 487          * Is it direct-mapped?
 488          */
 489         if ((baddr >= __direct_map_base) && 
 490             ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
 491                 vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
 492                 return (void __iomem *) vaddr;
 493         }
 494 
 495         /* 
 496          * Check the scatter-gather arena.
 497          */
 498         if (hose->sg_pci &&
 499             baddr >= (unsigned long)hose->sg_pci->dma_base &&
 500             last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
 501 
 502                 /*
 503                  * Adjust the limits (mappings must be page aligned)
 504                  */
 505                 baddr -= hose->sg_pci->dma_base;
 506                 last -= hose->sg_pci->dma_base;
 507                 baddr &= PAGE_MASK;
 508                 size = PAGE_ALIGN(last) - baddr;
 509 
 510                 /*
 511                  * Map it
 512                  */
 513                 area = get_vm_area(size, VM_IOREMAP);
 514                 if (!area) {
 515                         printk("ioremap failed... no vm_area...\n");
 516                         return NULL;
 517                 }
 518 
 519                 ptes = hose->sg_pci->ptes;
 520                 for (vaddr = (unsigned long)area->addr; 
 521                     baddr <= last; 
 522                     baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
 523                         pfn = ptes[baddr >> PAGE_SHIFT];
 524                         if (!(pfn & 1)) {
 525                                 printk("ioremap failed... pte not valid...\n");
 526                                 vfree(area->addr);
 527                                 return NULL;
 528                         }
 529                         pfn >>= 1;      /* make it a true pfn */
 530                         
 531                         if (__alpha_remap_area_pages(vaddr,
 532                                                      pfn << PAGE_SHIFT, 
 533                                                      PAGE_SIZE, 0)) {
 534                                 printk("FAILED to remap_area_pages...\n");
 535                                 vfree(area->addr);
 536                                 return NULL;
 537                         }
 538                 }
 539 
 540                 flush_tlb_all();
 541 
 542                 vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
 543                 return (void __iomem *) vaddr;
 544         }
 545 
 546         /* Assume a legacy (read: VGA) address, and return appropriately. */
 547         return (void __iomem *)(addr + TITAN_MEM_BIAS);
 548 }
 549 
 550 void
 551 titan_iounmap(volatile void __iomem *xaddr)
 552 {
 553         unsigned long addr = (unsigned long) xaddr;
 554         if (addr >= VMALLOC_START)
 555                 vfree((void *)(PAGE_MASK & addr)); 
 556 }
 557 
 558 int
 559 titan_is_mmio(const volatile void __iomem *xaddr)
 560 {
 561         unsigned long addr = (unsigned long) xaddr;
 562 
 563         if (addr >= VMALLOC_START)
 564                 return 1;
 565         else
 566                 return (addr & 0x100000000UL) == 0;
 567 }
 568 
 569 #ifndef CONFIG_ALPHA_GENERIC
 570 EXPORT_SYMBOL(titan_ioportmap);
 571 EXPORT_SYMBOL(titan_ioremap);
 572 EXPORT_SYMBOL(titan_iounmap);
 573 EXPORT_SYMBOL(titan_is_mmio);
 574 #endif
 575 
 576 /*
 577  * AGP GART Support.
 578  */
 579 #include <linux/agp_backend.h>
 580 #include <asm/agp_backend.h>
 581 #include <linux/slab.h>
 582 #include <linux/delay.h>
 583 
 584 struct titan_agp_aperture {
 585         struct pci_iommu_arena *arena;
 586         long pg_start;
 587         long pg_count;
 588 };
 589 
 590 static int
 591 titan_agp_setup(alpha_agp_info *agp)
 592 {
 593         struct titan_agp_aperture *aper;
 594 
 595         if (!alpha_agpgart_size)
 596                 return -ENOMEM;
 597 
 598         aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
 599         if (aper == NULL)
 600                 return -ENOMEM;
 601 
 602         aper->arena = agp->hose->sg_pci;
 603         aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
 604         aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
 605                                        aper->pg_count - 1);
 606         if (aper->pg_start < 0) {
 607                 printk(KERN_ERR "Failed to reserve AGP memory\n");
 608                 kfree(aper);
 609                 return -ENOMEM;
 610         }
 611 
 612         agp->aperture.bus_base = 
 613                 aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
 614         agp->aperture.size = aper->pg_count * PAGE_SIZE;
 615         agp->aperture.sysdata = aper;
 616 
 617         return 0;
 618 }
 619 
 620 static void
 621 titan_agp_cleanup(alpha_agp_info *agp)
 622 {
 623         struct titan_agp_aperture *aper = agp->aperture.sysdata;
 624         int status;
 625 
 626         status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
 627         if (status == -EBUSY) {
 628                 printk(KERN_WARNING 
 629                        "Attempted to release bound AGP memory - unbinding\n");
 630                 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
 631                 status = iommu_release(aper->arena, aper->pg_start, 
 632                                        aper->pg_count);
 633         }
 634         if (status < 0)
 635                 printk(KERN_ERR "Failed to release AGP memory\n");
 636 
 637         kfree(aper);
 638         kfree(agp);
 639 }
 640 
 641 static int
 642 titan_agp_configure(alpha_agp_info *agp)
 643 {
 644         union TPAchipPCTL pctl;
 645         titan_pachip_port *port = agp->private;
 646         pctl.pctl_q_whole = port->pctl.csr;
 647 
 648         /* Side-Band Addressing? */
 649         pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
 650 
 651         /* AGP Rate? */
 652         pctl.pctl_r_bits.apctl_v_agp_rate = 0;          /* 1x */
 653         if (agp->mode.bits.rate & 2) 
 654                 pctl.pctl_r_bits.apctl_v_agp_rate = 1;  /* 2x */
 655 #if 0
 656         if (agp->mode.bits.rate & 4) 
 657                 pctl.pctl_r_bits.apctl_v_agp_rate = 2;  /* 4x */
 658 #endif
 659         
 660         /* RQ Depth? */
 661         pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
 662         pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
 663 
 664         /*
 665          * AGP Enable.
 666          */
 667         pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
 668 
 669         /* Tell the user.  */
 670         printk("Enabling AGP: %dX%s\n", 
 671                1 << pctl.pctl_r_bits.apctl_v_agp_rate,
 672                pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
 673                
 674         /* Write it.  */
 675         port->pctl.csr = pctl.pctl_q_whole;
 676         
 677         /* And wait at least 5000 66MHz cycles (per Titan spec).  */
 678         udelay(100);
 679 
 680         return 0;
 681 }
 682 
 683 static int 
 684 titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
 685 {
 686         struct titan_agp_aperture *aper = agp->aperture.sysdata;
 687         return iommu_bind(aper->arena, aper->pg_start + pg_start, 
 688                           mem->page_count, mem->pages);
 689 }
 690 
 691 static int 
 692 titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
 693 {
 694         struct titan_agp_aperture *aper = agp->aperture.sysdata;
 695         return iommu_unbind(aper->arena, aper->pg_start + pg_start,
 696                             mem->page_count);
 697 }
 698 
 699 static unsigned long
 700 titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
 701 {
 702         struct titan_agp_aperture *aper = agp->aperture.sysdata;
 703         unsigned long baddr = addr - aper->arena->dma_base;
 704         unsigned long pte;
 705 
 706         if (addr < agp->aperture.bus_base ||
 707             addr >= agp->aperture.bus_base + agp->aperture.size) {
 708                 printk("%s: addr out of range\n", __func__);
 709                 return -EINVAL;
 710         }
 711 
 712         pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
 713         if (!(pte & 1)) {
 714                 printk("%s: pte not valid\n", __func__);
 715                 return -EINVAL;
 716         }
 717 
 718         return (pte >> 1) << PAGE_SHIFT;
 719 }
 720 
 721 struct alpha_agp_ops titan_agp_ops =
 722 {
 723         .setup          = titan_agp_setup,
 724         .cleanup        = titan_agp_cleanup,
 725         .configure      = titan_agp_configure,
 726         .bind           = titan_agp_bind_memory,
 727         .unbind         = titan_agp_unbind_memory,
 728         .translate      = titan_agp_translate
 729 };
 730 
 731 alpha_agp_info *
 732 titan_agp_info(void)
 733 {
 734         alpha_agp_info *agp;
 735         struct pci_controller *hose;
 736         titan_pachip_port *port;
 737         int hosenum = -1;
 738         union TPAchipPCTL pctl;
 739 
 740         /*
 741          * Find the AGP port.
 742          */
 743         port = &TITAN_pachip0->a_port;
 744         if (titan_query_agp(port))
 745                 hosenum = 2;
 746         if (hosenum < 0 && 
 747             titan_pchip1_present &&
 748             titan_query_agp(port = &TITAN_pachip1->a_port)) 
 749                 hosenum = 3;
 750         
 751         /*
 752          * Find the hose the port is on.
 753          */
 754         for (hose = hose_head; hose; hose = hose->next)
 755                 if (hose->index == hosenum)
 756                         break;
 757 
 758         if (!hose || !hose->sg_pci)
 759                 return NULL;
 760 
 761         /*
 762          * Allocate the info structure.
 763          */
 764         agp = kmalloc(sizeof(*agp), GFP_KERNEL);
 765         if (!agp)
 766                 return NULL;
 767 
 768         /*
 769          * Fill it in.
 770          */
 771         agp->hose = hose;
 772         agp->private = port;
 773         agp->ops = &titan_agp_ops;
 774 
 775         /*
 776          * Aperture - not configured until ops.setup().
 777          *
 778          * FIXME - should we go ahead and allocate it here?
 779          */
 780         agp->aperture.bus_base = 0;
 781         agp->aperture.size = 0;
 782         agp->aperture.sysdata = NULL;
 783 
 784         /*
 785          * Capabilities.
 786          */
 787         agp->capability.lw = 0;
 788         agp->capability.bits.rate = 3;  /* 2x, 1x */
 789         agp->capability.bits.sba = 1;
 790         agp->capability.bits.rq = 7;    /* 8 - 1 */
 791 
 792         /*
 793          * Mode.
 794          */
 795         pctl.pctl_q_whole = port->pctl.csr;
 796         agp->mode.lw = 0;
 797         agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
 798         agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
 799         agp->mode.bits.rq = 7;  /* RQ Depth? */
 800         agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
 801 
 802         return agp;
 803 }

/* [<][>][^][v][top][bottom][index][help] */