1/* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28#include <drm/drmP.h> 29#include "radeon.h" 30#include <drm/radeon_drm.h> 31#include "radeon_asic.h" 32 33#include <linux/vga_switcheroo.h> 34#include <linux/slab.h> 35#include <linux/pm_runtime.h> 36 37#include "radeon_kfd.h" 38 39#if defined(CONFIG_VGA_SWITCHEROO) 40bool radeon_has_atpx(void); 41#else 42static inline bool radeon_has_atpx(void) { return false; } 43#endif 44 45/** 46 * radeon_driver_unload_kms - Main unload function for KMS. 47 * 48 * @dev: drm dev pointer 49 * 50 * This is the main unload function for KMS (all asics). 51 * It calls radeon_modeset_fini() to tear down the 52 * displays, and radeon_device_fini() to tear down 53 * the rest of the device (CP, writeback, etc.). 54 * Returns 0 on success. 55 */ 56int radeon_driver_unload_kms(struct drm_device *dev) 57{ 58 struct radeon_device *rdev = dev->dev_private; 59 60 if (rdev == NULL) 61 return 0; 62 63 if (rdev->rmmio == NULL) 64 goto done_free; 65 66 pm_runtime_get_sync(dev->dev); 67 68 radeon_kfd_device_fini(rdev); 69 70 radeon_acpi_fini(rdev); 71 72 radeon_modeset_fini(rdev); 73 radeon_device_fini(rdev); 74 75done_free: 76 kfree(rdev); 77 dev->dev_private = NULL; 78 return 0; 79} 80 81/** 82 * radeon_driver_load_kms - Main load function for KMS. 83 * 84 * @dev: drm dev pointer 85 * @flags: device flags 86 * 87 * This is the main load function for KMS (all asics). 88 * It calls radeon_device_init() to set up the non-display 89 * parts of the chip (asic init, CP, writeback, etc.), and 90 * radeon_modeset_init() to set up the display parts 91 * (crtcs, encoders, hotplug detect, etc.). 92 * Returns 0 on success, error on failure. 93 */ 94int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 95{ 96 struct radeon_device *rdev; 97 int r, acpi_status; 98 99 rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); 100 if (rdev == NULL) { 101 return -ENOMEM; 102 } 103 dev->dev_private = (void *)rdev; 104 105 /* update BUS flag */ 106 if (drm_pci_device_is_agp(dev)) { 107 flags |= RADEON_IS_AGP; 108 } else if (pci_is_pcie(dev->pdev)) { 109 flags |= RADEON_IS_PCIE; 110 } else { 111 flags |= RADEON_IS_PCI; 112 } 113 114 if ((radeon_runtime_pm != 0) && 115 radeon_has_atpx() && 116 ((flags & RADEON_IS_IGP) == 0)) 117 flags |= RADEON_IS_PX; 118 119 /* radeon_device_init should report only fatal error 120 * like memory allocation failure or iomapping failure, 121 * or memory manager initialization failure, it must 122 * properly initialize the GPU MC controller and permit 123 * VRAM allocation 124 */ 125 r = radeon_device_init(rdev, dev, dev->pdev, flags); 126 if (r) { 127 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); 128 goto out; 129 } 130 131 /* Again modeset_init should fail only on fatal error 132 * otherwise it should provide enough functionalities 133 * for shadowfb to run 134 */ 135 r = radeon_modeset_init(rdev); 136 if (r) 137 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); 138 139 /* Call ACPI methods: require modeset init 140 * but failure is not fatal 141 */ 142 if (!r) { 143 acpi_status = radeon_acpi_init(rdev); 144 if (acpi_status) 145 dev_dbg(&dev->pdev->dev, 146 "Error during ACPI methods call\n"); 147 } 148 149 radeon_kfd_device_probe(rdev); 150 radeon_kfd_device_init(rdev); 151 152 if (radeon_is_px(dev)) { 153 pm_runtime_use_autosuspend(dev->dev); 154 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 155 pm_runtime_set_active(dev->dev); 156 pm_runtime_allow(dev->dev); 157 pm_runtime_mark_last_busy(dev->dev); 158 pm_runtime_put_autosuspend(dev->dev); 159 } 160 161out: 162 if (r) 163 radeon_driver_unload_kms(dev); 164 165 166 return r; 167} 168 169/** 170 * radeon_set_filp_rights - Set filp right. 171 * 172 * @dev: drm dev pointer 173 * @owner: drm file 174 * @applier: drm file 175 * @value: value 176 * 177 * Sets the filp rights for the device (all asics). 178 */ 179static void radeon_set_filp_rights(struct drm_device *dev, 180 struct drm_file **owner, 181 struct drm_file *applier, 182 uint32_t *value) 183{ 184 struct radeon_device *rdev = dev->dev_private; 185 186 mutex_lock(&rdev->gem.mutex); 187 if (*value == 1) { 188 /* wants rights */ 189 if (!*owner) 190 *owner = applier; 191 } else if (*value == 0) { 192 /* revokes rights */ 193 if (*owner == applier) 194 *owner = NULL; 195 } 196 *value = *owner == applier ? 1 : 0; 197 mutex_unlock(&rdev->gem.mutex); 198} 199 200/* 201 * Userspace get information ioctl 202 */ 203/** 204 * radeon_info_ioctl - answer a device specific request. 205 * 206 * @rdev: radeon device pointer 207 * @data: request object 208 * @filp: drm filp 209 * 210 * This function is used to pass device specific parameters to the userspace 211 * drivers. Examples include: pci device id, pipeline parms, tiling params, 212 * etc. (all asics). 213 * Returns 0 on success, -EINVAL on failure. 214 */ 215static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 216{ 217 struct radeon_device *rdev = dev->dev_private; 218 struct drm_radeon_info *info = data; 219 struct radeon_mode_info *minfo = &rdev->mode_info; 220 uint32_t *value, value_tmp, *value_ptr, value_size; 221 uint64_t value64; 222 struct drm_crtc *crtc; 223 int i, found; 224 225 value_ptr = (uint32_t *)((unsigned long)info->value); 226 value = &value_tmp; 227 value_size = sizeof(uint32_t); 228 229 switch (info->request) { 230 case RADEON_INFO_DEVICE_ID: 231 *value = dev->pdev->device; 232 break; 233 case RADEON_INFO_NUM_GB_PIPES: 234 *value = rdev->num_gb_pipes; 235 break; 236 case RADEON_INFO_NUM_Z_PIPES: 237 *value = rdev->num_z_pipes; 238 break; 239 case RADEON_INFO_ACCEL_WORKING: 240 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */ 241 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) 242 *value = false; 243 else 244 *value = rdev->accel_working; 245 break; 246 case RADEON_INFO_CRTC_FROM_ID: 247 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 248 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 249 return -EFAULT; 250 } 251 for (i = 0, found = 0; i < rdev->num_crtc; i++) { 252 crtc = (struct drm_crtc *)minfo->crtcs[i]; 253 if (crtc && crtc->base.id == *value) { 254 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 255 *value = radeon_crtc->crtc_id; 256 found = 1; 257 break; 258 } 259 } 260 if (!found) { 261 DRM_DEBUG_KMS("unknown crtc id %d\n", *value); 262 return -EINVAL; 263 } 264 break; 265 case RADEON_INFO_ACCEL_WORKING2: 266 if (rdev->family == CHIP_HAWAII) { 267 if (rdev->accel_working) { 268 if (rdev->new_fw) 269 *value = 3; 270 else 271 *value = 2; 272 } else { 273 *value = 0; 274 } 275 } else { 276 *value = rdev->accel_working; 277 } 278 break; 279 case RADEON_INFO_TILING_CONFIG: 280 if (rdev->family >= CHIP_BONAIRE) 281 *value = rdev->config.cik.tile_config; 282 else if (rdev->family >= CHIP_TAHITI) 283 *value = rdev->config.si.tile_config; 284 else if (rdev->family >= CHIP_CAYMAN) 285 *value = rdev->config.cayman.tile_config; 286 else if (rdev->family >= CHIP_CEDAR) 287 *value = rdev->config.evergreen.tile_config; 288 else if (rdev->family >= CHIP_RV770) 289 *value = rdev->config.rv770.tile_config; 290 else if (rdev->family >= CHIP_R600) 291 *value = rdev->config.r600.tile_config; 292 else { 293 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); 294 return -EINVAL; 295 } 296 break; 297 case RADEON_INFO_WANT_HYPERZ: 298 /* The "value" here is both an input and output parameter. 299 * If the input value is 1, filp requests hyper-z access. 300 * If the input value is 0, filp revokes its hyper-z access. 301 * 302 * When returning, the value is 1 if filp owns hyper-z access, 303 * 0 otherwise. */ 304 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 305 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 306 return -EFAULT; 307 } 308 if (*value >= 2) { 309 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value); 310 return -EINVAL; 311 } 312 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value); 313 break; 314 case RADEON_INFO_WANT_CMASK: 315 /* The same logic as Hyper-Z. */ 316 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 317 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 318 return -EFAULT; 319 } 320 if (*value >= 2) { 321 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value); 322 return -EINVAL; 323 } 324 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value); 325 break; 326 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 327 /* return clock value in KHz */ 328 if (rdev->asic->get_xclk) 329 *value = radeon_get_xclk(rdev) * 10; 330 else 331 *value = rdev->clock.spll.reference_freq * 10; 332 break; 333 case RADEON_INFO_NUM_BACKENDS: 334 if (rdev->family >= CHIP_BONAIRE) 335 *value = rdev->config.cik.max_backends_per_se * 336 rdev->config.cik.max_shader_engines; 337 else if (rdev->family >= CHIP_TAHITI) 338 *value = rdev->config.si.max_backends_per_se * 339 rdev->config.si.max_shader_engines; 340 else if (rdev->family >= CHIP_CAYMAN) 341 *value = rdev->config.cayman.max_backends_per_se * 342 rdev->config.cayman.max_shader_engines; 343 else if (rdev->family >= CHIP_CEDAR) 344 *value = rdev->config.evergreen.max_backends; 345 else if (rdev->family >= CHIP_RV770) 346 *value = rdev->config.rv770.max_backends; 347 else if (rdev->family >= CHIP_R600) 348 *value = rdev->config.r600.max_backends; 349 else { 350 return -EINVAL; 351 } 352 break; 353 case RADEON_INFO_NUM_TILE_PIPES: 354 if (rdev->family >= CHIP_BONAIRE) 355 *value = rdev->config.cik.max_tile_pipes; 356 else if (rdev->family >= CHIP_TAHITI) 357 *value = rdev->config.si.max_tile_pipes; 358 else if (rdev->family >= CHIP_CAYMAN) 359 *value = rdev->config.cayman.max_tile_pipes; 360 else if (rdev->family >= CHIP_CEDAR) 361 *value = rdev->config.evergreen.max_tile_pipes; 362 else if (rdev->family >= CHIP_RV770) 363 *value = rdev->config.rv770.max_tile_pipes; 364 else if (rdev->family >= CHIP_R600) 365 *value = rdev->config.r600.max_tile_pipes; 366 else { 367 return -EINVAL; 368 } 369 break; 370 case RADEON_INFO_FUSION_GART_WORKING: 371 *value = 1; 372 break; 373 case RADEON_INFO_BACKEND_MAP: 374 if (rdev->family >= CHIP_BONAIRE) 375 *value = rdev->config.cik.backend_map; 376 else if (rdev->family >= CHIP_TAHITI) 377 *value = rdev->config.si.backend_map; 378 else if (rdev->family >= CHIP_CAYMAN) 379 *value = rdev->config.cayman.backend_map; 380 else if (rdev->family >= CHIP_CEDAR) 381 *value = rdev->config.evergreen.backend_map; 382 else if (rdev->family >= CHIP_RV770) 383 *value = rdev->config.rv770.backend_map; 384 else if (rdev->family >= CHIP_R600) 385 *value = rdev->config.r600.backend_map; 386 else { 387 return -EINVAL; 388 } 389 break; 390 case RADEON_INFO_VA_START: 391 /* this is where we report if vm is supported or not */ 392 if (rdev->family < CHIP_CAYMAN) 393 return -EINVAL; 394 *value = RADEON_VA_RESERVED_SIZE; 395 break; 396 case RADEON_INFO_IB_VM_MAX_SIZE: 397 /* this is where we report if vm is supported or not */ 398 if (rdev->family < CHIP_CAYMAN) 399 return -EINVAL; 400 *value = RADEON_IB_VM_MAX_SIZE; 401 break; 402 case RADEON_INFO_MAX_PIPES: 403 if (rdev->family >= CHIP_BONAIRE) 404 *value = rdev->config.cik.max_cu_per_sh; 405 else if (rdev->family >= CHIP_TAHITI) 406 *value = rdev->config.si.max_cu_per_sh; 407 else if (rdev->family >= CHIP_CAYMAN) 408 *value = rdev->config.cayman.max_pipes_per_simd; 409 else if (rdev->family >= CHIP_CEDAR) 410 *value = rdev->config.evergreen.max_pipes; 411 else if (rdev->family >= CHIP_RV770) 412 *value = rdev->config.rv770.max_pipes; 413 else if (rdev->family >= CHIP_R600) 414 *value = rdev->config.r600.max_pipes; 415 else { 416 return -EINVAL; 417 } 418 break; 419 case RADEON_INFO_TIMESTAMP: 420 if (rdev->family < CHIP_R600) { 421 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); 422 return -EINVAL; 423 } 424 value = (uint32_t*)&value64; 425 value_size = sizeof(uint64_t); 426 value64 = radeon_get_gpu_clock_counter(rdev); 427 break; 428 case RADEON_INFO_MAX_SE: 429 if (rdev->family >= CHIP_BONAIRE) 430 *value = rdev->config.cik.max_shader_engines; 431 else if (rdev->family >= CHIP_TAHITI) 432 *value = rdev->config.si.max_shader_engines; 433 else if (rdev->family >= CHIP_CAYMAN) 434 *value = rdev->config.cayman.max_shader_engines; 435 else if (rdev->family >= CHIP_CEDAR) 436 *value = rdev->config.evergreen.num_ses; 437 else 438 *value = 1; 439 break; 440 case RADEON_INFO_MAX_SH_PER_SE: 441 if (rdev->family >= CHIP_BONAIRE) 442 *value = rdev->config.cik.max_sh_per_se; 443 else if (rdev->family >= CHIP_TAHITI) 444 *value = rdev->config.si.max_sh_per_se; 445 else 446 return -EINVAL; 447 break; 448 case RADEON_INFO_FASTFB_WORKING: 449 *value = rdev->fastfb_working; 450 break; 451 case RADEON_INFO_RING_WORKING: 452 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 453 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 454 return -EFAULT; 455 } 456 switch (*value) { 457 case RADEON_CS_RING_GFX: 458 case RADEON_CS_RING_COMPUTE: 459 *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready; 460 break; 461 case RADEON_CS_RING_DMA: 462 *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready; 463 *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready; 464 break; 465 case RADEON_CS_RING_UVD: 466 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready; 467 break; 468 case RADEON_CS_RING_VCE: 469 *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready; 470 break; 471 default: 472 return -EINVAL; 473 } 474 break; 475 case RADEON_INFO_SI_TILE_MODE_ARRAY: 476 if (rdev->family >= CHIP_BONAIRE) { 477 value = rdev->config.cik.tile_mode_array; 478 value_size = sizeof(uint32_t)*32; 479 } else if (rdev->family >= CHIP_TAHITI) { 480 value = rdev->config.si.tile_mode_array; 481 value_size = sizeof(uint32_t)*32; 482 } else { 483 DRM_DEBUG_KMS("tile mode array is si+ only!\n"); 484 return -EINVAL; 485 } 486 break; 487 case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY: 488 if (rdev->family >= CHIP_BONAIRE) { 489 value = rdev->config.cik.macrotile_mode_array; 490 value_size = sizeof(uint32_t)*16; 491 } else { 492 DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n"); 493 return -EINVAL; 494 } 495 break; 496 case RADEON_INFO_SI_CP_DMA_COMPUTE: 497 *value = 1; 498 break; 499 case RADEON_INFO_SI_BACKEND_ENABLED_MASK: 500 if (rdev->family >= CHIP_BONAIRE) { 501 *value = rdev->config.cik.backend_enable_mask; 502 } else if (rdev->family >= CHIP_TAHITI) { 503 *value = rdev->config.si.backend_enable_mask; 504 } else { 505 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n"); 506 } 507 break; 508 case RADEON_INFO_MAX_SCLK: 509 if ((rdev->pm.pm_method == PM_METHOD_DPM) && 510 rdev->pm.dpm_enabled) 511 *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; 512 else 513 *value = rdev->pm.default_sclk * 10; 514 break; 515 case RADEON_INFO_VCE_FW_VERSION: 516 *value = rdev->vce.fw_version; 517 break; 518 case RADEON_INFO_VCE_FB_VERSION: 519 *value = rdev->vce.fb_version; 520 break; 521 case RADEON_INFO_NUM_BYTES_MOVED: 522 value = (uint32_t*)&value64; 523 value_size = sizeof(uint64_t); 524 value64 = atomic64_read(&rdev->num_bytes_moved); 525 break; 526 case RADEON_INFO_VRAM_USAGE: 527 value = (uint32_t*)&value64; 528 value_size = sizeof(uint64_t); 529 value64 = atomic64_read(&rdev->vram_usage); 530 break; 531 case RADEON_INFO_GTT_USAGE: 532 value = (uint32_t*)&value64; 533 value_size = sizeof(uint64_t); 534 value64 = atomic64_read(&rdev->gtt_usage); 535 break; 536 case RADEON_INFO_ACTIVE_CU_COUNT: 537 if (rdev->family >= CHIP_BONAIRE) 538 *value = rdev->config.cik.active_cus; 539 else if (rdev->family >= CHIP_TAHITI) 540 *value = rdev->config.si.active_cus; 541 else if (rdev->family >= CHIP_CAYMAN) 542 *value = rdev->config.cayman.active_simds; 543 else if (rdev->family >= CHIP_CEDAR) 544 *value = rdev->config.evergreen.active_simds; 545 else if (rdev->family >= CHIP_RV770) 546 *value = rdev->config.rv770.active_simds; 547 else if (rdev->family >= CHIP_R600) 548 *value = rdev->config.r600.active_simds; 549 else 550 *value = 1; 551 break; 552 case RADEON_INFO_CURRENT_GPU_TEMP: 553 /* get temperature in millidegrees C */ 554 if (rdev->asic->pm.get_temperature) 555 *value = radeon_get_temperature(rdev); 556 else 557 *value = 0; 558 break; 559 case RADEON_INFO_CURRENT_GPU_SCLK: 560 /* get sclk in Mhz */ 561 if (rdev->pm.dpm_enabled) 562 *value = radeon_dpm_get_current_sclk(rdev) / 100; 563 else 564 *value = rdev->pm.current_sclk / 100; 565 break; 566 case RADEON_INFO_CURRENT_GPU_MCLK: 567 /* get mclk in Mhz */ 568 if (rdev->pm.dpm_enabled) 569 *value = radeon_dpm_get_current_mclk(rdev) / 100; 570 else 571 *value = rdev->pm.current_mclk / 100; 572 break; 573 case RADEON_INFO_READ_REG: 574 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) { 575 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); 576 return -EFAULT; 577 } 578 if (radeon_get_allowed_info_register(rdev, *value, value)) 579 return -EINVAL; 580 break; 581 case RADEON_INFO_VA_UNMAP_WORKING: 582 *value = true; 583 break; 584 case RADEON_INFO_GPU_RESET_COUNTER: 585 *value = atomic_read(&rdev->gpu_reset_counter); 586 break; 587 default: 588 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 589 return -EINVAL; 590 } 591 if (copy_to_user(value_ptr, (char*)value, value_size)) { 592 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 593 return -EFAULT; 594 } 595 return 0; 596} 597 598 599/* 600 * Outdated mess for old drm with Xorg being in charge (void function now). 601 */ 602/** 603 * radeon_driver_lastclose_kms - drm callback for last close 604 * 605 * @dev: drm dev pointer 606 * 607 * Switch vga_switcheroo state after last close (all asics). 608 */ 609void radeon_driver_lastclose_kms(struct drm_device *dev) 610{ 611 struct radeon_device *rdev = dev->dev_private; 612 613 radeon_fbdev_restore_mode(rdev); 614 vga_switcheroo_process_delayed_switch(); 615} 616 617/** 618 * radeon_driver_open_kms - drm callback for open 619 * 620 * @dev: drm dev pointer 621 * @file_priv: drm file 622 * 623 * On device open, init vm on cayman+ (all asics). 624 * Returns 0 on success, error on failure. 625 */ 626int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) 627{ 628 struct radeon_device *rdev = dev->dev_private; 629 int r; 630 631 file_priv->driver_priv = NULL; 632 633 r = pm_runtime_get_sync(dev->dev); 634 if (r < 0) 635 return r; 636 637 /* new gpu have virtual address space support */ 638 if (rdev->family >= CHIP_CAYMAN) { 639 struct radeon_fpriv *fpriv; 640 struct radeon_vm *vm; 641 int r; 642 643 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); 644 if (unlikely(!fpriv)) { 645 return -ENOMEM; 646 } 647 648 if (rdev->accel_working) { 649 vm = &fpriv->vm; 650 r = radeon_vm_init(rdev, vm); 651 if (r) { 652 kfree(fpriv); 653 return r; 654 } 655 656 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 657 if (r) { 658 radeon_vm_fini(rdev, vm); 659 kfree(fpriv); 660 return r; 661 } 662 663 /* map the ib pool buffer read only into 664 * virtual address space */ 665 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm, 666 rdev->ring_tmp_bo.bo); 667 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, 668 RADEON_VA_IB_OFFSET, 669 RADEON_VM_PAGE_READABLE | 670 RADEON_VM_PAGE_SNOOPED); 671 if (r) { 672 radeon_vm_fini(rdev, vm); 673 kfree(fpriv); 674 return r; 675 } 676 } 677 file_priv->driver_priv = fpriv; 678 } 679 680 pm_runtime_mark_last_busy(dev->dev); 681 pm_runtime_put_autosuspend(dev->dev); 682 return 0; 683} 684 685/** 686 * radeon_driver_postclose_kms - drm callback for post close 687 * 688 * @dev: drm dev pointer 689 * @file_priv: drm file 690 * 691 * On device post close, tear down vm on cayman+ (all asics). 692 */ 693void radeon_driver_postclose_kms(struct drm_device *dev, 694 struct drm_file *file_priv) 695{ 696 struct radeon_device *rdev = dev->dev_private; 697 698 /* new gpu have virtual address space support */ 699 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { 700 struct radeon_fpriv *fpriv = file_priv->driver_priv; 701 struct radeon_vm *vm = &fpriv->vm; 702 int r; 703 704 if (rdev->accel_working) { 705 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); 706 if (!r) { 707 if (vm->ib_bo_va) 708 radeon_vm_bo_rmv(rdev, vm->ib_bo_va); 709 radeon_bo_unreserve(rdev->ring_tmp_bo.bo); 710 } 711 radeon_vm_fini(rdev, vm); 712 } 713 714 kfree(fpriv); 715 file_priv->driver_priv = NULL; 716 } 717} 718 719/** 720 * radeon_driver_preclose_kms - drm callback for pre close 721 * 722 * @dev: drm dev pointer 723 * @file_priv: drm file 724 * 725 * On device pre close, tear down hyperz and cmask filps on r1xx-r5xx 726 * (all asics). 727 */ 728void radeon_driver_preclose_kms(struct drm_device *dev, 729 struct drm_file *file_priv) 730{ 731 struct radeon_device *rdev = dev->dev_private; 732 733 mutex_lock(&rdev->gem.mutex); 734 if (rdev->hyperz_filp == file_priv) 735 rdev->hyperz_filp = NULL; 736 if (rdev->cmask_filp == file_priv) 737 rdev->cmask_filp = NULL; 738 mutex_unlock(&rdev->gem.mutex); 739 740 radeon_uvd_free_handles(rdev, file_priv); 741 radeon_vce_free_handles(rdev, file_priv); 742} 743 744/* 745 * VBlank related functions. 746 */ 747/** 748 * radeon_get_vblank_counter_kms - get frame count 749 * 750 * @dev: drm dev pointer 751 * @crtc: crtc to get the frame count from 752 * 753 * Gets the frame count on the requested crtc (all asics). 754 * Returns frame count on success, -EINVAL on failure. 755 */ 756u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) 757{ 758 int vpos, hpos, stat; 759 u32 count; 760 struct radeon_device *rdev = dev->dev_private; 761 762 if (crtc < 0 || crtc >= rdev->num_crtc) { 763 DRM_ERROR("Invalid crtc %d\n", crtc); 764 return -EINVAL; 765 } 766 767 /* The hw increments its frame counter at start of vsync, not at start 768 * of vblank, as is required by DRM core vblank counter handling. 769 * Cook the hw count here to make it appear to the caller as if it 770 * incremented at start of vblank. We measure distance to start of 771 * vblank in vpos. vpos therefore will be >= 0 between start of vblank 772 * and start of vsync, so vpos >= 0 means to bump the hw frame counter 773 * result by 1 to give the proper appearance to caller. 774 */ 775 if (rdev->mode_info.crtcs[crtc]) { 776 /* Repeat readout if needed to provide stable result if 777 * we cross start of vsync during the queries. 778 */ 779 do { 780 count = radeon_get_vblank_counter(rdev, crtc); 781 /* Ask radeon_get_crtc_scanoutpos to return vpos as 782 * distance to start of vblank, instead of regular 783 * vertical scanout pos. 784 */ 785 stat = radeon_get_crtc_scanoutpos( 786 dev, crtc, GET_DISTANCE_TO_VBLANKSTART, 787 &vpos, &hpos, NULL, NULL, 788 &rdev->mode_info.crtcs[crtc]->base.hwmode); 789 } while (count != radeon_get_vblank_counter(rdev, crtc)); 790 791 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != 792 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { 793 DRM_DEBUG_VBL("Query failed! stat %d\n", stat); 794 } 795 else { 796 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", 797 crtc, vpos); 798 799 /* Bump counter if we are at >= leading edge of vblank, 800 * but before vsync where vpos would turn negative and 801 * the hw counter really increments. 802 */ 803 if (vpos >= 0) 804 count++; 805 } 806 } 807 else { 808 /* Fallback to use value as is. */ 809 count = radeon_get_vblank_counter(rdev, crtc); 810 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); 811 } 812 813 return count; 814} 815 816/** 817 * radeon_enable_vblank_kms - enable vblank interrupt 818 * 819 * @dev: drm dev pointer 820 * @crtc: crtc to enable vblank interrupt for 821 * 822 * Enable the interrupt on the requested crtc (all asics). 823 * Returns 0 on success, -EINVAL on failure. 824 */ 825int radeon_enable_vblank_kms(struct drm_device *dev, int crtc) 826{ 827 struct radeon_device *rdev = dev->dev_private; 828 unsigned long irqflags; 829 int r; 830 831 if (crtc < 0 || crtc >= rdev->num_crtc) { 832 DRM_ERROR("Invalid crtc %d\n", crtc); 833 return -EINVAL; 834 } 835 836 spin_lock_irqsave(&rdev->irq.lock, irqflags); 837 rdev->irq.crtc_vblank_int[crtc] = true; 838 r = radeon_irq_set(rdev); 839 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 840 return r; 841} 842 843/** 844 * radeon_disable_vblank_kms - disable vblank interrupt 845 * 846 * @dev: drm dev pointer 847 * @crtc: crtc to disable vblank interrupt for 848 * 849 * Disable the interrupt on the requested crtc (all asics). 850 */ 851void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) 852{ 853 struct radeon_device *rdev = dev->dev_private; 854 unsigned long irqflags; 855 856 if (crtc < 0 || crtc >= rdev->num_crtc) { 857 DRM_ERROR("Invalid crtc %d\n", crtc); 858 return; 859 } 860 861 spin_lock_irqsave(&rdev->irq.lock, irqflags); 862 rdev->irq.crtc_vblank_int[crtc] = false; 863 radeon_irq_set(rdev); 864 spin_unlock_irqrestore(&rdev->irq.lock, irqflags); 865} 866 867/** 868 * radeon_get_vblank_timestamp_kms - get vblank timestamp 869 * 870 * @dev: drm dev pointer 871 * @crtc: crtc to get the timestamp for 872 * @max_error: max error 873 * @vblank_time: time value 874 * @flags: flags passed to the driver 875 * 876 * Gets the timestamp on the requested crtc based on the 877 * scanout position. (all asics). 878 * Returns postive status flags on success, negative error on failure. 879 */ 880int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, 881 int *max_error, 882 struct timeval *vblank_time, 883 unsigned flags) 884{ 885 struct drm_crtc *drmcrtc; 886 struct radeon_device *rdev = dev->dev_private; 887 888 if (crtc < 0 || crtc >= dev->num_crtcs) { 889 DRM_ERROR("Invalid crtc %d\n", crtc); 890 return -EINVAL; 891 } 892 893 /* Get associated drm_crtc: */ 894 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 895 if (!drmcrtc) 896 return -EINVAL; 897 898 /* Helper routine in DRM core does all the work: */ 899 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 900 vblank_time, flags, 901 &drmcrtc->hwmode); 902} 903 904const struct drm_ioctl_desc radeon_ioctls_kms[] = { 905 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 906 DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 907 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 908 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 909 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH), 910 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH), 911 DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH), 912 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH), 913 DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH), 914 DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH), 915 DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH), 916 DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH), 917 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH), 918 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH), 919 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 920 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH), 921 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH), 922 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH), 923 DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH), 924 DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH), 925 DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH), 926 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 927 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH), 928 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH), 929 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH), 930 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH), 931 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH), 932 /* KMS */ 933 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 934 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 935 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 936 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 937 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), 938 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), 939 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 940 DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 941 DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 942 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 943 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 944 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 945 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 946 DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 947 DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 948}; 949int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); 950