1/* via_dma.c -- DMA support for the VIA Unichrome/Pro 2 * 3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 4 * All Rights Reserved. 5 * 6 * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A. 7 * All Rights Reserved. 8 * 9 * Copyright 2004 The Unichrome project. 10 * All Rights Reserved. 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a 13 * copy of this software and associated documentation files (the "Software"), 14 * to deal in the Software without restriction, including without limitation 15 * the rights to use, copy, modify, merge, publish, distribute, sub license, 16 * and/or sell copies of the Software, and to permit persons to whom the 17 * Software is furnished to do so, subject to the following conditions: 18 * 19 * The above copyright notice and this permission notice (including the 20 * next paragraph) shall be included in all copies or substantial portions 21 * of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 29 * USE OR OTHER DEALINGS IN THE SOFTWARE. 30 * 31 * Authors: 32 * Tungsten Graphics, 33 * Erdi Chen, 34 * Thomas Hellstrom. 35 */ 36 37#include <drm/drmP.h> 38#include <drm/via_drm.h> 39#include "via_drv.h" 40#include "via_3d_reg.h" 41 42#define CMDBUF_ALIGNMENT_SIZE (0x100) 43#define CMDBUF_ALIGNMENT_MASK (0x0ff) 44 45/* defines for VIA 3D registers */ 46#define VIA_REG_STATUS 0x400 47#define VIA_REG_TRANSET 0x43C 48#define VIA_REG_TRANSPACE 0x440 49 50/* VIA_REG_STATUS(0x400): Engine Status */ 51#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */ 52#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */ 53#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */ 54#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */ 55 56#define SetReg2DAGP(nReg, nData) { \ 57 *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \ 58 *((uint32_t *)(vb) + 1) = (nData); \ 59 vb = ((uint32_t *)vb) + 2; \ 60 dev_priv->dma_low += 8; \ 61} 62 63#define via_flush_write_combine() mb() 64 65#define VIA_OUT_RING_QW(w1, w2) do { \ 66 *vb++ = (w1); \ 67 *vb++ = (w2); \ 68 dev_priv->dma_low += 8; \ 69} while (0) 70 71static void via_cmdbuf_start(drm_via_private_t *dev_priv); 72static void via_cmdbuf_pause(drm_via_private_t *dev_priv); 73static void via_cmdbuf_reset(drm_via_private_t *dev_priv); 74static void via_cmdbuf_rewind(drm_via_private_t *dev_priv); 75static int via_wait_idle(drm_via_private_t *dev_priv); 76static void via_pad_cache(drm_via_private_t *dev_priv, int qwords); 77 78/* 79 * Free space in command buffer. 80 */ 81 82static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv) 83{ 84 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 85 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; 86 87 return ((hw_addr <= dev_priv->dma_low) ? 88 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) : 89 (hw_addr - dev_priv->dma_low)); 90} 91 92/* 93 * How much does the command regulator lag behind? 94 */ 95 96static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv) 97{ 98 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 99 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; 100 101 return ((hw_addr <= dev_priv->dma_low) ? 102 (dev_priv->dma_low - hw_addr) : 103 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr)); 104} 105 106/* 107 * Check that the given size fits in the buffer, otherwise wait. 108 */ 109 110static inline int 111via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size) 112{ 113 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 114 uint32_t cur_addr, hw_addr, next_addr; 115 volatile uint32_t *hw_addr_ptr; 116 uint32_t count; 117 hw_addr_ptr = dev_priv->hw_addr_ptr; 118 cur_addr = dev_priv->dma_low; 119 next_addr = cur_addr + size + 512 * 1024; 120 count = 1000000; 121 do { 122 hw_addr = *hw_addr_ptr - agp_base; 123 if (count-- == 0) { 124 DRM_ERROR 125 ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n", 126 hw_addr, cur_addr, next_addr); 127 return -1; 128 } 129 if ((cur_addr < hw_addr) && (next_addr >= hw_addr)) 130 msleep(1); 131 } while ((cur_addr < hw_addr) && (next_addr >= hw_addr)); 132 return 0; 133} 134 135/* 136 * Checks whether buffer head has reach the end. Rewind the ring buffer 137 * when necessary. 138 * 139 * Returns virtual pointer to ring buffer. 140 */ 141 142static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv, 143 unsigned int size) 144{ 145 if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) > 146 dev_priv->dma_high) { 147 via_cmdbuf_rewind(dev_priv); 148 } 149 if (via_cmdbuf_wait(dev_priv, size) != 0) 150 return NULL; 151 152 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); 153} 154 155int via_dma_cleanup(struct drm_device *dev) 156{ 157 if (dev->dev_private) { 158 drm_via_private_t *dev_priv = 159 (drm_via_private_t *) dev->dev_private; 160 161 if (dev_priv->ring.virtual_start) { 162 via_cmdbuf_reset(dev_priv); 163 164 drm_legacy_ioremapfree(&dev_priv->ring.map, dev); 165 dev_priv->ring.virtual_start = NULL; 166 } 167 168 } 169 170 return 0; 171} 172 173static int via_initialize(struct drm_device *dev, 174 drm_via_private_t *dev_priv, 175 drm_via_dma_init_t *init) 176{ 177 if (!dev_priv || !dev_priv->mmio) { 178 DRM_ERROR("via_dma_init called before via_map_init\n"); 179 return -EFAULT; 180 } 181 182 if (dev_priv->ring.virtual_start != NULL) { 183 DRM_ERROR("called again without calling cleanup\n"); 184 return -EFAULT; 185 } 186 187 if (!dev->agp || !dev->agp->base) { 188 DRM_ERROR("called with no agp memory available\n"); 189 return -EFAULT; 190 } 191 192 if (dev_priv->chipset == VIA_DX9_0) { 193 DRM_ERROR("AGP DMA is not supported on this chip\n"); 194 return -EINVAL; 195 } 196 197 dev_priv->ring.map.offset = dev->agp->base + init->offset; 198 dev_priv->ring.map.size = init->size; 199 dev_priv->ring.map.type = 0; 200 dev_priv->ring.map.flags = 0; 201 dev_priv->ring.map.mtrr = 0; 202 203 drm_legacy_ioremap(&dev_priv->ring.map, dev); 204 205 if (dev_priv->ring.map.handle == NULL) { 206 via_dma_cleanup(dev); 207 DRM_ERROR("can not ioremap virtual address for" 208 " ring buffer\n"); 209 return -ENOMEM; 210 } 211 212 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 213 214 dev_priv->dma_ptr = dev_priv->ring.virtual_start; 215 dev_priv->dma_low = 0; 216 dev_priv->dma_high = init->size; 217 dev_priv->dma_wrap = init->size; 218 dev_priv->dma_offset = init->offset; 219 dev_priv->last_pause_ptr = NULL; 220 dev_priv->hw_addr_ptr = 221 (volatile uint32_t *)((char *)dev_priv->mmio->handle + 222 init->reg_pause_addr); 223 224 via_cmdbuf_start(dev_priv); 225 226 return 0; 227} 228 229static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 230{ 231 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 232 drm_via_dma_init_t *init = data; 233 int retcode = 0; 234 235 switch (init->func) { 236 case VIA_INIT_DMA: 237 if (!capable(CAP_SYS_ADMIN)) 238 retcode = -EPERM; 239 else 240 retcode = via_initialize(dev, dev_priv, init); 241 break; 242 case VIA_CLEANUP_DMA: 243 if (!capable(CAP_SYS_ADMIN)) 244 retcode = -EPERM; 245 else 246 retcode = via_dma_cleanup(dev); 247 break; 248 case VIA_DMA_INITIALIZED: 249 retcode = (dev_priv->ring.virtual_start != NULL) ? 250 0 : -EFAULT; 251 break; 252 default: 253 retcode = -EINVAL; 254 break; 255 } 256 257 return retcode; 258} 259 260static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd) 261{ 262 drm_via_private_t *dev_priv; 263 uint32_t *vb; 264 int ret; 265 266 dev_priv = (drm_via_private_t *) dev->dev_private; 267 268 if (dev_priv->ring.virtual_start == NULL) { 269 DRM_ERROR("called without initializing AGP ring buffer.\n"); 270 return -EFAULT; 271 } 272 273 if (cmd->size > VIA_PCI_BUF_SIZE) 274 return -ENOMEM; 275 276 if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size)) 277 return -EFAULT; 278 279 /* 280 * Running this function on AGP memory is dead slow. Therefore 281 * we run it on a temporary cacheable system memory buffer and 282 * copy it to AGP memory when ready. 283 */ 284 285 if ((ret = 286 via_verify_command_stream((uint32_t *) dev_priv->pci_buf, 287 cmd->size, dev, 1))) { 288 return ret; 289 } 290 291 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); 292 if (vb == NULL) 293 return -EAGAIN; 294 295 memcpy(vb, dev_priv->pci_buf, cmd->size); 296 297 dev_priv->dma_low += cmd->size; 298 299 /* 300 * Small submissions somehow stalls the CPU. (AGP cache effects?) 301 * pad to greater size. 302 */ 303 304 if (cmd->size < 0x100) 305 via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3); 306 via_cmdbuf_pause(dev_priv); 307 308 return 0; 309} 310 311int via_driver_dma_quiescent(struct drm_device *dev) 312{ 313 drm_via_private_t *dev_priv = dev->dev_private; 314 315 if (!via_wait_idle(dev_priv)) 316 return -EBUSY; 317 return 0; 318} 319 320static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 321{ 322 323 LOCK_TEST_WITH_RETURN(dev, file_priv); 324 325 return via_driver_dma_quiescent(dev); 326} 327 328static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) 329{ 330 drm_via_cmdbuffer_t *cmdbuf = data; 331 int ret; 332 333 LOCK_TEST_WITH_RETURN(dev, file_priv); 334 335 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size); 336 337 ret = via_dispatch_cmdbuffer(dev, cmdbuf); 338 return ret; 339} 340 341static int via_dispatch_pci_cmdbuffer(struct drm_device *dev, 342 drm_via_cmdbuffer_t *cmd) 343{ 344 drm_via_private_t *dev_priv = dev->dev_private; 345 int ret; 346 347 if (cmd->size > VIA_PCI_BUF_SIZE) 348 return -ENOMEM; 349 if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size)) 350 return -EFAULT; 351 352 if ((ret = 353 via_verify_command_stream((uint32_t *) dev_priv->pci_buf, 354 cmd->size, dev, 0))) { 355 return ret; 356 } 357 358 ret = 359 via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf, 360 cmd->size); 361 return ret; 362} 363 364static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) 365{ 366 drm_via_cmdbuffer_t *cmdbuf = data; 367 int ret; 368 369 LOCK_TEST_WITH_RETURN(dev, file_priv); 370 371 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size); 372 373 ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf); 374 return ret; 375} 376 377static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv, 378 uint32_t * vb, int qw_count) 379{ 380 for (; qw_count > 0; --qw_count) 381 VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY); 382 return vb; 383} 384 385/* 386 * This function is used internally by ring buffer management code. 387 * 388 * Returns virtual pointer to ring buffer. 389 */ 390static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv) 391{ 392 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); 393} 394 395/* 396 * Hooks a segment of data into the tail of the ring-buffer by 397 * modifying the pause address stored in the buffer itself. If 398 * the regulator has already paused, restart it. 399 */ 400static int via_hook_segment(drm_via_private_t *dev_priv, 401 uint32_t pause_addr_hi, uint32_t pause_addr_lo, 402 int no_pci_fire) 403{ 404 int paused, count; 405 volatile uint32_t *paused_at = dev_priv->last_pause_ptr; 406 uint32_t reader, ptr; 407 uint32_t diff; 408 409 paused = 0; 410 via_flush_write_combine(); 411 (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1); 412 413 *paused_at = pause_addr_lo; 414 via_flush_write_combine(); 415 (void) *paused_at; 416 417 reader = *(dev_priv->hw_addr_ptr); 418 ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) + 419 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; 420 421 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1; 422 423 /* 424 * If there is a possibility that the command reader will 425 * miss the new pause address and pause on the old one, 426 * In that case we need to program the new start address 427 * using PCI. 428 */ 429 430 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; 431 count = 10000000; 432 while (diff == 0 && count--) { 433 paused = (VIA_READ(0x41c) & 0x80000000); 434 if (paused) 435 break; 436 reader = *(dev_priv->hw_addr_ptr); 437 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; 438 } 439 440 paused = VIA_READ(0x41c) & 0x80000000; 441 442 if (paused && !no_pci_fire) { 443 reader = *(dev_priv->hw_addr_ptr); 444 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; 445 diff &= (dev_priv->dma_high - 1); 446 if (diff != 0 && diff < (dev_priv->dma_high >> 1)) { 447 DRM_ERROR("Paused at incorrect address. " 448 "0x%08x, 0x%08x 0x%08x\n", 449 ptr, reader, dev_priv->dma_diff); 450 } else if (diff == 0) { 451 /* 452 * There is a concern that these writes may stall the PCI bus 453 * if the GPU is not idle. However, idling the GPU first 454 * doesn't make a difference. 455 */ 456 457 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16)); 458 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); 459 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); 460 VIA_READ(VIA_REG_TRANSPACE); 461 } 462 } 463 return paused; 464} 465 466static int via_wait_idle(drm_via_private_t *dev_priv) 467{ 468 int count = 10000000; 469 470 while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count) 471 ; 472 473 while (count && (VIA_READ(VIA_REG_STATUS) & 474 (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | 475 VIA_3D_ENG_BUSY))) 476 --count; 477 return count; 478} 479 480static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type, 481 uint32_t addr, uint32_t *cmd_addr_hi, 482 uint32_t *cmd_addr_lo, int skip_wait) 483{ 484 uint32_t agp_base; 485 uint32_t cmd_addr, addr_lo, addr_hi; 486 uint32_t *vb; 487 uint32_t qw_pad_count; 488 489 if (!skip_wait) 490 via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE); 491 492 vb = via_get_dma(dev_priv); 493 VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) | 494 (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); 495 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 496 qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) - 497 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3); 498 499 cmd_addr = (addr) ? addr : 500 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3); 501 addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) | 502 (cmd_addr & HC_HAGPBpL_MASK)); 503 addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24)); 504 505 vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1); 506 VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo); 507 return vb; 508} 509 510static void via_cmdbuf_start(drm_via_private_t *dev_priv) 511{ 512 uint32_t pause_addr_lo, pause_addr_hi; 513 uint32_t start_addr, start_addr_lo; 514 uint32_t end_addr, end_addr_lo; 515 uint32_t command; 516 uint32_t agp_base; 517 uint32_t ptr; 518 uint32_t reader; 519 int count; 520 521 dev_priv->dma_low = 0; 522 523 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 524 start_addr = agp_base; 525 end_addr = agp_base + dev_priv->dma_high; 526 527 start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF)); 528 end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF)); 529 command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) | 530 ((end_addr & 0xff000000) >> 16)); 531 532 dev_priv->last_pause_ptr = 533 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, 534 &pause_addr_hi, &pause_addr_lo, 1) - 1; 535 536 via_flush_write_combine(); 537 (void) *(volatile uint32_t *)dev_priv->last_pause_ptr; 538 539 VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16)); 540 VIA_WRITE(VIA_REG_TRANSPACE, command); 541 VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo); 542 VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo); 543 544 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); 545 VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); 546 wmb(); 547 VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK); 548 VIA_READ(VIA_REG_TRANSPACE); 549 550 dev_priv->dma_diff = 0; 551 552 count = 10000000; 553 while (!(VIA_READ(0x41c) & 0x80000000) && count--); 554 555 reader = *(dev_priv->hw_addr_ptr); 556 ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) + 557 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; 558 559 /* 560 * This is the difference between where we tell the 561 * command reader to pause and where it actually pauses. 562 * This differs between hw implementation so we need to 563 * detect it. 564 */ 565 566 dev_priv->dma_diff = ptr - reader; 567} 568 569static void via_pad_cache(drm_via_private_t *dev_priv, int qwords) 570{ 571 uint32_t *vb; 572 573 via_cmdbuf_wait(dev_priv, qwords + 2); 574 vb = via_get_dma(dev_priv); 575 VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16); 576 via_align_buffer(dev_priv, vb, qwords); 577} 578 579static inline void via_dummy_bitblt(drm_via_private_t *dev_priv) 580{ 581 uint32_t *vb = via_get_dma(dev_priv); 582 SetReg2DAGP(0x0C, (0 | (0 << 16))); 583 SetReg2DAGP(0x10, 0 | (0 << 16)); 584 SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000); 585} 586 587static void via_cmdbuf_jump(drm_via_private_t *dev_priv) 588{ 589 uint32_t agp_base; 590 uint32_t pause_addr_lo, pause_addr_hi; 591 uint32_t jump_addr_lo, jump_addr_hi; 592 volatile uint32_t *last_pause_ptr; 593 uint32_t dma_low_save1, dma_low_save2; 594 595 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; 596 via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi, 597 &jump_addr_lo, 0); 598 599 dev_priv->dma_wrap = dev_priv->dma_low; 600 601 /* 602 * Wrap command buffer to the beginning. 603 */ 604 605 dev_priv->dma_low = 0; 606 if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) 607 DRM_ERROR("via_cmdbuf_jump failed\n"); 608 609 via_dummy_bitblt(dev_priv); 610 via_dummy_bitblt(dev_priv); 611 612 last_pause_ptr = 613 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, 614 &pause_addr_lo, 0) - 1; 615 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, 616 &pause_addr_lo, 0); 617 618 *last_pause_ptr = pause_addr_lo; 619 dma_low_save1 = dev_priv->dma_low; 620 621 /* 622 * Now, set a trap that will pause the regulator if it tries to rerun the old 623 * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause 624 * and reissues the jump command over PCI, while the regulator has already taken the jump 625 * and actually paused at the current buffer end). 626 * There appears to be no other way to detect this condition, since the hw_addr_pointer 627 * does not seem to get updated immediately when a jump occurs. 628 */ 629 630 last_pause_ptr = 631 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, 632 &pause_addr_lo, 0) - 1; 633 via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, 634 &pause_addr_lo, 0); 635 *last_pause_ptr = pause_addr_lo; 636 637 dma_low_save2 = dev_priv->dma_low; 638 dev_priv->dma_low = dma_low_save1; 639 via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0); 640 dev_priv->dma_low = dma_low_save2; 641 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0); 642} 643 644 645static void via_cmdbuf_rewind(drm_via_private_t *dev_priv) 646{ 647 via_cmdbuf_jump(dev_priv); 648} 649 650static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type) 651{ 652 uint32_t pause_addr_lo, pause_addr_hi; 653 654 via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0); 655 via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0); 656} 657 658static void via_cmdbuf_pause(drm_via_private_t *dev_priv) 659{ 660 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE); 661} 662 663static void via_cmdbuf_reset(drm_via_private_t *dev_priv) 664{ 665 via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP); 666 via_wait_idle(dev_priv); 667} 668 669/* 670 * User interface to the space and lag functions. 671 */ 672 673static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv) 674{ 675 drm_via_cmdbuf_size_t *d_siz = data; 676 int ret = 0; 677 uint32_t tmp_size, count; 678 drm_via_private_t *dev_priv; 679 680 DRM_DEBUG("\n"); 681 LOCK_TEST_WITH_RETURN(dev, file_priv); 682 683 dev_priv = (drm_via_private_t *) dev->dev_private; 684 685 if (dev_priv->ring.virtual_start == NULL) { 686 DRM_ERROR("called without initializing AGP ring buffer.\n"); 687 return -EFAULT; 688 } 689 690 count = 1000000; 691 tmp_size = d_siz->size; 692 switch (d_siz->func) { 693 case VIA_CMDBUF_SPACE: 694 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) 695 && --count) { 696 if (!d_siz->wait) 697 break; 698 } 699 if (!count) { 700 DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); 701 ret = -EAGAIN; 702 } 703 break; 704 case VIA_CMDBUF_LAG: 705 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size) 706 && --count) { 707 if (!d_siz->wait) 708 break; 709 } 710 if (!count) { 711 DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); 712 ret = -EAGAIN; 713 } 714 break; 715 default: 716 ret = -EFAULT; 717 } 718 d_siz->size = tmp_size; 719 720 return ret; 721} 722 723const struct drm_ioctl_desc via_ioctls[] = { 724 DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH), 725 DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH), 726 DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER), 727 DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER), 728 DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER), 729 DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH), 730 DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH), 731 DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH), 732 DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH), 733 DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH), 734 DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH), 735 DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH), 736 DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH), 737 DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH) 738}; 739 740int via_max_ioctl = ARRAY_SIZE(via_ioctls); 741