root/drivers/video/fbdev/via/via-core.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. viafb_mmio_write
  2. viafb_mmio_read
  3. viafb_int_init
  4. viafb_irq_enable
  5. viafb_irq_disable
  6. viafb_dma_irq
  7. viafb_request_dma
  8. viafb_release_dma
  9. viafb_dma_copy_out_sg
  10. viafb_get_fb_size_from_pci
  11. via_pci_setup_mmio
  12. via_pci_teardown_mmio
  13. via_create_subdev
  14. via_setup_subdevs
  15. via_teardown_subdevs
  16. viafb_pm_register
  17. viafb_pm_unregister
  18. via_suspend
  19. via_resume
  20. via_pci_probe
  21. via_pci_remove
  22. via_core_init
  23. via_core_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved.
   4  * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
   5  * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
   6  */
   7 
   8 /*
   9  * Core code for the Via multifunction framebuffer device.
  10  */
  11 #include <linux/via-core.h>
  12 #include <linux/via_i2c.h>
  13 #include <linux/via-gpio.h>
  14 #include "global.h"
  15 
  16 #include <linux/module.h>
  17 #include <linux/interrupt.h>
  18 #include <linux/platform_device.h>
  19 #include <linux/list.h>
  20 #include <linux/pm.h>
  21 
  22 /*
  23  * The default port config.
  24  */
  25 static struct via_port_cfg adap_configs[] = {
  26         [VIA_PORT_26]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x26 },
  27         [VIA_PORT_31]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x31 },
  28         [VIA_PORT_25]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  29         [VIA_PORT_2C]   = { VIA_PORT_GPIO, VIA_MODE_I2C, VIASR, 0x2c },
  30         [VIA_PORT_3D]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  31         { 0, 0, 0, 0 }
  32 };
  33 
  34 /*
  35  * The OLPC XO-1.5 puts the camera power and reset lines onto
  36  * GPIO 2C.
  37  */
  38 static struct via_port_cfg olpc_adap_configs[] = {
  39         [VIA_PORT_26]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x26 },
  40         [VIA_PORT_31]   = { VIA_PORT_I2C,  VIA_MODE_I2C, VIASR, 0x31 },
  41         [VIA_PORT_25]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x25 },
  42         [VIA_PORT_2C]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x2c },
  43         [VIA_PORT_3D]   = { VIA_PORT_GPIO, VIA_MODE_GPIO, VIASR, 0x3d },
  44         { 0, 0, 0, 0 }
  45 };
  46 
  47 /*
  48  * We currently only support one viafb device (will there ever be
  49  * more than one?), so just declare it globally here.
  50  */
  51 static struct viafb_dev global_dev;
  52 
  53 
  54 /*
  55  * Basic register access; spinlock required.
  56  */
  57 static inline void viafb_mmio_write(int reg, u32 v)
  58 {
  59         iowrite32(v, global_dev.engine_mmio + reg);
  60 }
  61 
  62 static inline int viafb_mmio_read(int reg)
  63 {
  64         return ioread32(global_dev.engine_mmio + reg);
  65 }
  66 
  67 /* ---------------------------------------------------------------------- */
  68 /*
  69  * Interrupt management.  We have a single IRQ line for a lot of
  70  * different functions, so we need to share it.  The design here
  71  * is that we don't want to reimplement the shared IRQ code here;
  72  * we also want to avoid having contention for a single handler thread.
  73  * So each subdev driver which needs interrupts just requests
  74  * them directly from the kernel.  We just have what's needed for
  75  * overall access to the interrupt control register.
  76  */
  77 
  78 /*
  79  * Which interrupts are enabled now?
  80  */
  81 static u32 viafb_enabled_ints;
  82 
  83 static void viafb_int_init(void)
  84 {
  85         viafb_enabled_ints = 0;
  86 
  87         viafb_mmio_write(VDE_INTERRUPT, 0);
  88 }
  89 
  90 /*
  91  * Allow subdevs to ask for specific interrupts to be enabled.  These
  92  * functions must be called with reg_lock held
  93  */
  94 void viafb_irq_enable(u32 mask)
  95 {
  96         viafb_enabled_ints |= mask;
  97         viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
  98 }
  99 EXPORT_SYMBOL_GPL(viafb_irq_enable);
 100 
 101 void viafb_irq_disable(u32 mask)
 102 {
 103         viafb_enabled_ints &= ~mask;
 104         if (viafb_enabled_ints == 0)
 105                 viafb_mmio_write(VDE_INTERRUPT, 0);  /* Disable entirely */
 106         else
 107                 viafb_mmio_write(VDE_INTERRUPT,
 108                                 viafb_enabled_ints | VDE_I_ENABLE);
 109 }
 110 EXPORT_SYMBOL_GPL(viafb_irq_disable);
 111 
 112 /* ---------------------------------------------------------------------- */
 113 /*
 114  * Currently, the camera driver is the only user of the DMA code, so we
 115  * only compile it in if the camera driver is being built.  Chances are,
 116  * most viafb systems will not need to have this extra code for a while.
 117  * As soon as another user comes long, the ifdef can be removed.
 118  */
 119 #if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA)
 120 /*
 121  * Access to the DMA engine.  This currently provides what the camera
 122  * driver needs (i.e. outgoing only) but is easily expandable if need
 123  * be.
 124  */
 125 
 126 /*
 127  * There are four DMA channels in the vx855.  For now, we only
 128  * use one of them, though.  Most of the time, the DMA channel
 129  * will be idle, so we keep the IRQ handler unregistered except
 130  * when some subsystem has indicated an interest.
 131  */
 132 static int viafb_dma_users;
 133 static DECLARE_COMPLETION(viafb_dma_completion);
 134 /*
 135  * This mutex protects viafb_dma_users and our global interrupt
 136  * registration state; it also serializes access to the DMA
 137  * engine.
 138  */
 139 static DEFINE_MUTEX(viafb_dma_lock);
 140 
 141 /*
 142  * The VX855 DMA descriptor (used for s/g transfers) looks
 143  * like this.
 144  */
 145 struct viafb_vx855_dma_descr {
 146         u32     addr_low;       /* Low part of phys addr */
 147         u32     addr_high;      /* High 12 bits of addr */
 148         u32     fb_offset;      /* Offset into FB memory */
 149         u32     seg_size;       /* Size, 16-byte units */
 150         u32     tile_mode;      /* "tile mode" setting */
 151         u32     next_desc_low;  /* Next descriptor addr */
 152         u32     next_desc_high;
 153         u32     pad;            /* Fill out to 64 bytes */
 154 };
 155 
 156 /*
 157  * Flags added to the "next descriptor low" pointers
 158  */
 159 #define VIAFB_DMA_MAGIC         0x01  /* ??? Just has to be there */
 160 #define VIAFB_DMA_FINAL_SEGMENT 0x02  /* Final segment */
 161 
 162 /*
 163  * The completion IRQ handler.
 164  */
 165 static irqreturn_t viafb_dma_irq(int irq, void *data)
 166 {
 167         int csr;
 168         irqreturn_t ret = IRQ_NONE;
 169 
 170         spin_lock(&global_dev.reg_lock);
 171         csr = viafb_mmio_read(VDMA_CSR0);
 172         if (csr & VDMA_C_DONE) {
 173                 viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
 174                 complete(&viafb_dma_completion);
 175                 ret = IRQ_HANDLED;
 176         }
 177         spin_unlock(&global_dev.reg_lock);
 178         return ret;
 179 }
 180 
 181 /*
 182  * Indicate a need for DMA functionality.
 183  */
 184 int viafb_request_dma(void)
 185 {
 186         int ret = 0;
 187 
 188         /*
 189          * Only VX855 is supported currently.
 190          */
 191         if (global_dev.chip_type != UNICHROME_VX855)
 192                 return -ENODEV;
 193         /*
 194          * Note the new user and set up our interrupt handler
 195          * if need be.
 196          */
 197         mutex_lock(&viafb_dma_lock);
 198         viafb_dma_users++;
 199         if (viafb_dma_users == 1) {
 200                 ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
 201                                 IRQF_SHARED, "via-dma", &viafb_dma_users);
 202                 if (ret)
 203                         viafb_dma_users--;
 204                 else
 205                         viafb_irq_enable(VDE_I_DMA0TDEN);
 206         }
 207         mutex_unlock(&viafb_dma_lock);
 208         return ret;
 209 }
 210 EXPORT_SYMBOL_GPL(viafb_request_dma);
 211 
 212 void viafb_release_dma(void)
 213 {
 214         mutex_lock(&viafb_dma_lock);
 215         viafb_dma_users--;
 216         if (viafb_dma_users == 0) {
 217                 viafb_irq_disable(VDE_I_DMA0TDEN);
 218                 free_irq(global_dev.pdev->irq, &viafb_dma_users);
 219         }
 220         mutex_unlock(&viafb_dma_lock);
 221 }
 222 EXPORT_SYMBOL_GPL(viafb_release_dma);
 223 
 224 /*
 225  * Do a scatter/gather DMA copy from FB memory.  You must have done
 226  * a successful call to viafb_request_dma() first.
 227  */
 228 int viafb_dma_copy_out_sg(unsigned int offset, struct scatterlist *sg, int nsg)
 229 {
 230         struct viafb_vx855_dma_descr *descr;
 231         void *descrpages;
 232         dma_addr_t descr_handle;
 233         unsigned long flags;
 234         int i;
 235         struct scatterlist *sgentry;
 236         dma_addr_t nextdesc;
 237 
 238         /*
 239          * Get a place to put the descriptors.
 240          */
 241         descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
 242                         nsg*sizeof(struct viafb_vx855_dma_descr),
 243                         &descr_handle, GFP_KERNEL);
 244         if (descrpages == NULL) {
 245                 dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n");
 246                 return -ENOMEM;
 247         }
 248         mutex_lock(&viafb_dma_lock);
 249         /*
 250          * Fill them in.
 251          */
 252         descr = descrpages;
 253         nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
 254         for_each_sg(sg, sgentry, nsg, i) {
 255                 dma_addr_t paddr = sg_dma_address(sgentry);
 256                 descr->addr_low = paddr & 0xfffffff0;
 257                 descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
 258                 descr->fb_offset = offset;
 259                 descr->seg_size = sg_dma_len(sgentry) >> 4;
 260                 descr->tile_mode = 0;
 261                 descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
 262                 descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
 263                 descr->pad = 0xffffffff;  /* VIA driver does this */
 264                 offset += sg_dma_len(sgentry);
 265                 nextdesc += sizeof(struct viafb_vx855_dma_descr);
 266                 descr++;
 267         }
 268         descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC;
 269         /*
 270          * Program the engine.
 271          */
 272         spin_lock_irqsave(&global_dev.reg_lock, flags);
 273         init_completion(&viafb_dma_completion);
 274         viafb_mmio_write(VDMA_DQWCR0, 0);
 275         viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
 276         viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
 277         viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
 278         viafb_mmio_write(VDMA_DPRH0,
 279                         (((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
 280         (void) viafb_mmio_read(VDMA_CSR0);
 281         viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
 282         spin_unlock_irqrestore(&global_dev.reg_lock, flags);
 283         /*
 284          * Now we just wait until the interrupt handler says
 285          * we're done.  Except that, actually, we need to wait a little
 286          * longer: the interrupts seem to jump the gun a little and we
 287          * get corrupted frames sometimes.
 288          */
 289         wait_for_completion_timeout(&viafb_dma_completion, 1);
 290         msleep(1);
 291         if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
 292                 printk(KERN_ERR "VIA DMA timeout!\n");
 293         /*
 294          * Clean up and we're done.
 295          */
 296         viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
 297         viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
 298         mutex_unlock(&viafb_dma_lock);
 299         dma_free_coherent(&global_dev.pdev->dev,
 300                         nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
 301                         descr_handle);
 302         return 0;
 303 }
 304 EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg);
 305 #endif /* CONFIG_VIDEO_VIA_CAMERA */
 306 
 307 /* ---------------------------------------------------------------------- */
 308 /*
 309  * Figure out how big our framebuffer memory is.  Kind of ugly,
 310  * but evidently we can't trust the information found in the
 311  * fbdev configuration area.
 312  */
 313 static u16 via_function3[] = {
 314         CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
 315         CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
 316         P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3, VX900_FUNCTION3,
 317 };
 318 
 319 /* Get the BIOS-configured framebuffer size from PCI configuration space
 320  * of function 3 in the respective chipset */
 321 static int viafb_get_fb_size_from_pci(int chip_type)
 322 {
 323         int i;
 324         u8 offset = 0;
 325         u32 FBSize;
 326         u32 VideoMemSize;
 327 
 328         /* search for the "FUNCTION3" device in this chipset */
 329         for (i = 0; i < ARRAY_SIZE(via_function3); i++) {
 330                 struct pci_dev *pdev;
 331 
 332                 pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
 333                                       NULL);
 334                 if (!pdev)
 335                         continue;
 336 
 337                 DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
 338 
 339                 switch (pdev->device) {
 340                 case CLE266_FUNCTION3:
 341                 case KM400_FUNCTION3:
 342                         offset = 0xE0;
 343                         break;
 344                 case CN400_FUNCTION3:
 345                 case CN700_FUNCTION3:
 346                 case CX700_FUNCTION3:
 347                 case KM800_FUNCTION3:
 348                 case KM890_FUNCTION3:
 349                 case P4M890_FUNCTION3:
 350                 case P4M900_FUNCTION3:
 351                 case VX800_FUNCTION3:
 352                 case VX855_FUNCTION3:
 353                 case VX900_FUNCTION3:
 354                 /*case CN750_FUNCTION3: */
 355                         offset = 0xA0;
 356                         break;
 357                 }
 358 
 359                 if (!offset)
 360                         break;
 361 
 362                 pci_read_config_dword(pdev, offset, &FBSize);
 363                 pci_dev_put(pdev);
 364         }
 365 
 366         if (!offset) {
 367                 printk(KERN_ERR "cannot determine framebuffer size\n");
 368                 return -EIO;
 369         }
 370 
 371         FBSize = FBSize & 0x00007000;
 372         DEBUG_MSG(KERN_INFO "FB Size = %x\n", FBSize);
 373 
 374         if (chip_type < UNICHROME_CX700) {
 375                 switch (FBSize) {
 376                 case 0x00004000:
 377                         VideoMemSize = (16 << 20);      /*16M */
 378                         break;
 379 
 380                 case 0x00005000:
 381                         VideoMemSize = (32 << 20);      /*32M */
 382                         break;
 383 
 384                 case 0x00006000:
 385                         VideoMemSize = (64 << 20);      /*64M */
 386                         break;
 387 
 388                 default:
 389                         VideoMemSize = (32 << 20);      /*32M */
 390                         break;
 391                 }
 392         } else {
 393                 switch (FBSize) {
 394                 case 0x00001000:
 395                         VideoMemSize = (8 << 20);       /*8M */
 396                         break;
 397 
 398                 case 0x00002000:
 399                         VideoMemSize = (16 << 20);      /*16M */
 400                         break;
 401 
 402                 case 0x00003000:
 403                         VideoMemSize = (32 << 20);      /*32M */
 404                         break;
 405 
 406                 case 0x00004000:
 407                         VideoMemSize = (64 << 20);      /*64M */
 408                         break;
 409 
 410                 case 0x00005000:
 411                         VideoMemSize = (128 << 20);     /*128M */
 412                         break;
 413 
 414                 case 0x00006000:
 415                         VideoMemSize = (256 << 20);     /*256M */
 416                         break;
 417 
 418                 case 0x00007000:        /* Only on VX855/875 */
 419                         VideoMemSize = (512 << 20);     /*512M */
 420                         break;
 421 
 422                 default:
 423                         VideoMemSize = (32 << 20);      /*32M */
 424                         break;
 425                 }
 426         }
 427 
 428         return VideoMemSize;
 429 }
 430 
 431 
 432 /*
 433  * Figure out and map our MMIO regions.
 434  */
 435 static int via_pci_setup_mmio(struct viafb_dev *vdev)
 436 {
 437         int ret;
 438         /*
 439          * Hook up to the device registers.  Note that we soldier
 440          * on if it fails; the framebuffer can operate (without
 441          * acceleration) without this region.
 442          */
 443         vdev->engine_start = pci_resource_start(vdev->pdev, 1);
 444         vdev->engine_len = pci_resource_len(vdev->pdev, 1);
 445         vdev->engine_mmio = ioremap_nocache(vdev->engine_start,
 446                         vdev->engine_len);
 447         if (vdev->engine_mmio == NULL)
 448                 dev_err(&vdev->pdev->dev,
 449                                 "Unable to map engine MMIO; operation will be "
 450                                 "slow and crippled.\n");
 451         /*
 452          * Map in framebuffer memory.  For now, failure here is
 453          * fatal.  Unfortunately, in the absence of significant
 454          * vmalloc space, failure here is also entirely plausible.
 455          * Eventually we want to move away from mapping this
 456          * entire region.
 457          */
 458         if (vdev->chip_type == UNICHROME_VX900)
 459                 vdev->fbmem_start = pci_resource_start(vdev->pdev, 2);
 460         else
 461                 vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
 462         ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type);
 463         if (ret < 0)
 464                 goto out_unmap;
 465 
 466         /* try to map less memory on failure, 8 MB should be still enough */
 467         for (; vdev->fbmem_len >= 8 << 20; vdev->fbmem_len /= 2) {
 468                 vdev->fbmem = ioremap_wc(vdev->fbmem_start, vdev->fbmem_len);
 469                 if (vdev->fbmem)
 470                         break;
 471         }
 472 
 473         if (vdev->fbmem == NULL) {
 474                 ret = -ENOMEM;
 475                 goto out_unmap;
 476         }
 477         return 0;
 478 out_unmap:
 479         iounmap(vdev->engine_mmio);
 480         return ret;
 481 }
 482 
 483 static void via_pci_teardown_mmio(struct viafb_dev *vdev)
 484 {
 485         iounmap(vdev->fbmem);
 486         iounmap(vdev->engine_mmio);
 487 }
 488 
 489 /*
 490  * Create our subsidiary devices.
 491  */
 492 static struct viafb_subdev_info {
 493         char *name;
 494         struct platform_device *platdev;
 495 } viafb_subdevs[] = {
 496         {
 497                 .name = "viafb-gpio",
 498         },
 499         {
 500                 .name = "viafb-i2c",
 501         },
 502 #if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA)
 503         {
 504                 .name = "viafb-camera",
 505         },
 506 #endif
 507 };
 508 #define N_SUBDEVS ARRAY_SIZE(viafb_subdevs)
 509 
 510 static int via_create_subdev(struct viafb_dev *vdev,
 511                              struct viafb_subdev_info *info)
 512 {
 513         int ret;
 514 
 515         info->platdev = platform_device_alloc(info->name, -1);
 516         if (!info->platdev) {
 517                 dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
 518                         info->name);
 519                 return -ENOMEM;
 520         }
 521         info->platdev->dev.parent = &vdev->pdev->dev;
 522         info->platdev->dev.platform_data = vdev;
 523         ret = platform_device_add(info->platdev);
 524         if (ret) {
 525                 dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
 526                                 info->name);
 527                 platform_device_put(info->platdev);
 528                 info->platdev = NULL;
 529         }
 530         return ret;
 531 }
 532 
 533 static int via_setup_subdevs(struct viafb_dev *vdev)
 534 {
 535         int i;
 536 
 537         /*
 538          * Ignore return values.  Even if some of the devices
 539          * fail to be created, we'll still be able to use some
 540          * of the rest.
 541          */
 542         for (i = 0; i < N_SUBDEVS; i++)
 543                 via_create_subdev(vdev, viafb_subdevs + i);
 544         return 0;
 545 }
 546 
 547 static void via_teardown_subdevs(void)
 548 {
 549         int i;
 550 
 551         for (i = 0; i < N_SUBDEVS; i++)
 552                 if (viafb_subdevs[i].platdev) {
 553                         viafb_subdevs[i].platdev->dev.platform_data = NULL;
 554                         platform_device_unregister(viafb_subdevs[i].platdev);
 555                 }
 556 }
 557 
 558 /*
 559  * Power management functions
 560  */
 561 #ifdef CONFIG_PM
 562 static LIST_HEAD(viafb_pm_hooks);
 563 static DEFINE_MUTEX(viafb_pm_hooks_lock);
 564 
 565 void viafb_pm_register(struct viafb_pm_hooks *hooks)
 566 {
 567         INIT_LIST_HEAD(&hooks->list);
 568 
 569         mutex_lock(&viafb_pm_hooks_lock);
 570         list_add_tail(&hooks->list, &viafb_pm_hooks);
 571         mutex_unlock(&viafb_pm_hooks_lock);
 572 }
 573 EXPORT_SYMBOL_GPL(viafb_pm_register);
 574 
 575 void viafb_pm_unregister(struct viafb_pm_hooks *hooks)
 576 {
 577         mutex_lock(&viafb_pm_hooks_lock);
 578         list_del(&hooks->list);
 579         mutex_unlock(&viafb_pm_hooks_lock);
 580 }
 581 EXPORT_SYMBOL_GPL(viafb_pm_unregister);
 582 
 583 static int via_suspend(struct pci_dev *pdev, pm_message_t state)
 584 {
 585         struct viafb_pm_hooks *hooks;
 586 
 587         if (state.event != PM_EVENT_SUSPEND)
 588                 return 0;
 589         /*
 590          * "I've occasionally hit a few drivers that caused suspend
 591          * failures, and each and every time it was a driver bug, and
 592          * the right thing to do was to just ignore the error and suspend
 593          * anyway - returning an error code and trying to undo the suspend
 594          * is not what anybody ever really wants, even if our model
 595          *_allows_ for it."
 596          * -- Linus Torvalds, Dec. 7, 2009
 597          */
 598         mutex_lock(&viafb_pm_hooks_lock);
 599         list_for_each_entry_reverse(hooks, &viafb_pm_hooks, list)
 600                 hooks->suspend(hooks->private);
 601         mutex_unlock(&viafb_pm_hooks_lock);
 602 
 603         pci_save_state(pdev);
 604         pci_disable_device(pdev);
 605         pci_set_power_state(pdev, pci_choose_state(pdev, state));
 606         return 0;
 607 }
 608 
 609 static int via_resume(struct pci_dev *pdev)
 610 {
 611         struct viafb_pm_hooks *hooks;
 612 
 613         /* Get the bus side powered up */
 614         pci_set_power_state(pdev, PCI_D0);
 615         pci_restore_state(pdev);
 616         if (pci_enable_device(pdev))
 617                 return 0;
 618 
 619         pci_set_master(pdev);
 620 
 621         /* Now bring back any subdevs */
 622         mutex_lock(&viafb_pm_hooks_lock);
 623         list_for_each_entry(hooks, &viafb_pm_hooks, list)
 624                 hooks->resume(hooks->private);
 625         mutex_unlock(&viafb_pm_hooks_lock);
 626 
 627         return 0;
 628 }
 629 #endif /* CONFIG_PM */
 630 
 631 static int via_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 632 {
 633         int ret;
 634 
 635         ret = pci_enable_device(pdev);
 636         if (ret)
 637                 return ret;
 638 
 639         /*
 640          * Global device initialization.
 641          */
 642         memset(&global_dev, 0, sizeof(global_dev));
 643         global_dev.pdev = pdev;
 644         global_dev.chip_type = ent->driver_data;
 645         global_dev.port_cfg = adap_configs;
 646         if (machine_is_olpc())
 647                 global_dev.port_cfg = olpc_adap_configs;
 648 
 649         spin_lock_init(&global_dev.reg_lock);
 650         ret = via_pci_setup_mmio(&global_dev);
 651         if (ret)
 652                 goto out_disable;
 653         /*
 654          * Set up interrupts and create our subdevices.  Continue even if
 655          * some things fail.
 656          */
 657         viafb_int_init();
 658         via_setup_subdevs(&global_dev);
 659         /*
 660          * Set up the framebuffer device
 661          */
 662         ret = via_fb_pci_probe(&global_dev);
 663         if (ret)
 664                 goto out_subdevs;
 665         return 0;
 666 
 667 out_subdevs:
 668         via_teardown_subdevs();
 669         via_pci_teardown_mmio(&global_dev);
 670 out_disable:
 671         pci_disable_device(pdev);
 672         return ret;
 673 }
 674 
 675 static void via_pci_remove(struct pci_dev *pdev)
 676 {
 677         via_teardown_subdevs();
 678         via_fb_pci_remove(pdev);
 679         via_pci_teardown_mmio(&global_dev);
 680         pci_disable_device(pdev);
 681 }
 682 
 683 
 684 static const struct pci_device_id via_pci_table[] = {
 685         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CLE266_DID),
 686           .driver_data = UNICHROME_CLE266 },
 687         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K400_DID),
 688           .driver_data = UNICHROME_K400 },
 689         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K800_DID),
 690           .driver_data = UNICHROME_K800 },
 691         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_PM800_DID),
 692           .driver_data = UNICHROME_PM800 },
 693         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN700_DID),
 694           .driver_data = UNICHROME_CN700 },
 695         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CX700_DID),
 696           .driver_data = UNICHROME_CX700 },
 697         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_CN750_DID),
 698           .driver_data = UNICHROME_CN750 },
 699         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_K8M890_DID),
 700           .driver_data = UNICHROME_K8M890 },
 701         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M890_DID),
 702           .driver_data = UNICHROME_P4M890 },
 703         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_P4M900_DID),
 704           .driver_data = UNICHROME_P4M900 },
 705         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX800_DID),
 706           .driver_data = UNICHROME_VX800 },
 707         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX855_DID),
 708           .driver_data = UNICHROME_VX855 },
 709         { PCI_DEVICE(PCI_VENDOR_ID_VIA, UNICHROME_VX900_DID),
 710           .driver_data = UNICHROME_VX900 },
 711         { }
 712 };
 713 MODULE_DEVICE_TABLE(pci, via_pci_table);
 714 
 715 static struct pci_driver via_driver = {
 716         .name           = "viafb",
 717         .id_table       = via_pci_table,
 718         .probe          = via_pci_probe,
 719         .remove         = via_pci_remove,
 720 #ifdef CONFIG_PM
 721         .suspend        = via_suspend,
 722         .resume         = via_resume,
 723 #endif
 724 };
 725 
 726 static int __init via_core_init(void)
 727 {
 728         int ret;
 729 
 730         ret = viafb_init();
 731         if (ret)
 732                 return ret;
 733         viafb_i2c_init();
 734         viafb_gpio_init();
 735         return pci_register_driver(&via_driver);
 736 }
 737 
 738 static void __exit via_core_exit(void)
 739 {
 740         pci_unregister_driver(&via_driver);
 741         viafb_gpio_exit();
 742         viafb_i2c_exit();
 743         viafb_exit();
 744 }
 745 
 746 module_init(via_core_init);
 747 module_exit(via_core_exit);

/* [<][>][^][v][top][bottom][index][help] */