1/* 2 * Copyright 2008-2009 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: 25 * Dave Airlie <airlied@redhat.com> 26 * Alex Deucher <alexander.deucher@amd.com> 27 * 28 * ------------------------ This file is DEPRECATED! ------------------------- 29 */ 30 31#include <linux/module.h> 32 33#include <drm/drmP.h> 34#include <drm/radeon_drm.h> 35#include "radeon_drv.h" 36 37#define PFP_UCODE_SIZE 576 38#define PM4_UCODE_SIZE 1792 39#define R700_PFP_UCODE_SIZE 848 40#define R700_PM4_UCODE_SIZE 1360 41 42/* Firmware Names */ 43MODULE_FIRMWARE("radeon/R600_pfp.bin"); 44MODULE_FIRMWARE("radeon/R600_me.bin"); 45MODULE_FIRMWARE("radeon/RV610_pfp.bin"); 46MODULE_FIRMWARE("radeon/RV610_me.bin"); 47MODULE_FIRMWARE("radeon/RV630_pfp.bin"); 48MODULE_FIRMWARE("radeon/RV630_me.bin"); 49MODULE_FIRMWARE("radeon/RV620_pfp.bin"); 50MODULE_FIRMWARE("radeon/RV620_me.bin"); 51MODULE_FIRMWARE("radeon/RV635_pfp.bin"); 52MODULE_FIRMWARE("radeon/RV635_me.bin"); 53MODULE_FIRMWARE("radeon/RV670_pfp.bin"); 54MODULE_FIRMWARE("radeon/RV670_me.bin"); 55MODULE_FIRMWARE("radeon/RS780_pfp.bin"); 56MODULE_FIRMWARE("radeon/RS780_me.bin"); 57MODULE_FIRMWARE("radeon/RV770_pfp.bin"); 58MODULE_FIRMWARE("radeon/RV770_me.bin"); 59MODULE_FIRMWARE("radeon/RV730_pfp.bin"); 60MODULE_FIRMWARE("radeon/RV730_me.bin"); 61MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 62MODULE_FIRMWARE("radeon/RV710_me.bin"); 63 64 65int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, 66 unsigned family, u32 *ib, int *l); 67void r600_cs_legacy_init(void); 68 69 70# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */ 71# define ATI_PCIGART_PAGE_MASK (~(ATI_PCIGART_PAGE_SIZE-1)) 72 73#define R600_PTE_VALID (1 << 0) 74#define R600_PTE_SYSTEM (1 << 1) 75#define R600_PTE_SNOOPED (1 << 2) 76#define R600_PTE_READABLE (1 << 5) 77#define R600_PTE_WRITEABLE (1 << 6) 78 79/* MAX values used for gfx init */ 80#define R6XX_MAX_SH_GPRS 256 81#define R6XX_MAX_TEMP_GPRS 16 82#define R6XX_MAX_SH_THREADS 256 83#define R6XX_MAX_SH_STACK_ENTRIES 4096 84#define R6XX_MAX_BACKENDS 8 85#define R6XX_MAX_BACKENDS_MASK 0xff 86#define R6XX_MAX_SIMDS 8 87#define R6XX_MAX_SIMDS_MASK 0xff 88#define R6XX_MAX_PIPES 8 89#define R6XX_MAX_PIPES_MASK 0xff 90 91#define R7XX_MAX_SH_GPRS 256 92#define R7XX_MAX_TEMP_GPRS 16 93#define R7XX_MAX_SH_THREADS 256 94#define R7XX_MAX_SH_STACK_ENTRIES 4096 95#define R7XX_MAX_BACKENDS 8 96#define R7XX_MAX_BACKENDS_MASK 0xff 97#define R7XX_MAX_SIMDS 16 98#define R7XX_MAX_SIMDS_MASK 0xffff 99#define R7XX_MAX_PIPES 8 100#define R7XX_MAX_PIPES_MASK 0xff 101 102static int r600_do_wait_for_fifo(drm_radeon_private_t *dev_priv, int entries) 103{ 104 int i; 105 106 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 107 108 for (i = 0; i < dev_priv->usec_timeout; i++) { 109 int slots; 110 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) 111 slots = (RADEON_READ(R600_GRBM_STATUS) 112 & R700_CMDFIFO_AVAIL_MASK); 113 else 114 slots = (RADEON_READ(R600_GRBM_STATUS) 115 & R600_CMDFIFO_AVAIL_MASK); 116 if (slots >= entries) 117 return 0; 118 DRM_UDELAY(1); 119 } 120 DRM_INFO("wait for fifo failed status : 0x%08X 0x%08X\n", 121 RADEON_READ(R600_GRBM_STATUS), 122 RADEON_READ(R600_GRBM_STATUS2)); 123 124 return -EBUSY; 125} 126 127static int r600_do_wait_for_idle(drm_radeon_private_t *dev_priv) 128{ 129 int i, ret; 130 131 dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 132 133 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) 134 ret = r600_do_wait_for_fifo(dev_priv, 8); 135 else 136 ret = r600_do_wait_for_fifo(dev_priv, 16); 137 if (ret) 138 return ret; 139 for (i = 0; i < dev_priv->usec_timeout; i++) { 140 if (!(RADEON_READ(R600_GRBM_STATUS) & R600_GUI_ACTIVE)) 141 return 0; 142 DRM_UDELAY(1); 143 } 144 DRM_INFO("wait idle failed status : 0x%08X 0x%08X\n", 145 RADEON_READ(R600_GRBM_STATUS), 146 RADEON_READ(R600_GRBM_STATUS2)); 147 148 return -EBUSY; 149} 150 151void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) 152{ 153 struct drm_sg_mem *entry = dev->sg; 154 int max_pages; 155 int pages; 156 int i; 157 158 if (!entry) 159 return; 160 161 if (gart_info->bus_addr) { 162 max_pages = (gart_info->table_size / sizeof(u64)); 163 pages = (entry->pages <= max_pages) 164 ? entry->pages : max_pages; 165 166 for (i = 0; i < pages; i++) { 167 if (!entry->busaddr[i]) 168 break; 169 pci_unmap_page(dev->pdev, entry->busaddr[i], 170 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 171 } 172 if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) 173 gart_info->bus_addr = 0; 174 } 175} 176 177/* R600 has page table setup */ 178int r600_page_table_init(struct drm_device *dev) 179{ 180 drm_radeon_private_t *dev_priv = dev->dev_private; 181 struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info; 182 struct drm_local_map *map = &gart_info->mapping; 183 struct drm_sg_mem *entry = dev->sg; 184 int ret = 0; 185 int i, j; 186 int pages; 187 u64 page_base; 188 dma_addr_t entry_addr; 189 int max_ati_pages, max_real_pages, gart_idx; 190 191 /* okay page table is available - lets rock */ 192 max_ati_pages = (gart_info->table_size / sizeof(u64)); 193 max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); 194 195 pages = (entry->pages <= max_real_pages) ? 196 entry->pages : max_real_pages; 197 198 memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u64)); 199 200 gart_idx = 0; 201 for (i = 0; i < pages; i++) { 202 entry->busaddr[i] = pci_map_page(dev->pdev, 203 entry->pagelist[i], 0, 204 PAGE_SIZE, 205 PCI_DMA_BIDIRECTIONAL); 206 if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) { 207 DRM_ERROR("unable to map PCIGART pages!\n"); 208 r600_page_table_cleanup(dev, gart_info); 209 goto done; 210 } 211 entry_addr = entry->busaddr[i]; 212 for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) { 213 page_base = (u64) entry_addr & ATI_PCIGART_PAGE_MASK; 214 page_base |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED; 215 page_base |= R600_PTE_READABLE | R600_PTE_WRITEABLE; 216 217 DRM_WRITE64(map, gart_idx * sizeof(u64), page_base); 218 219 gart_idx++; 220 221 if ((i % 128) == 0) 222 DRM_DEBUG("page entry %d: 0x%016llx\n", 223 i, (unsigned long long)page_base); 224 entry_addr += ATI_PCIGART_PAGE_SIZE; 225 } 226 } 227 ret = 1; 228done: 229 return ret; 230} 231 232static void r600_vm_flush_gart_range(struct drm_device *dev) 233{ 234 drm_radeon_private_t *dev_priv = dev->dev_private; 235 u32 resp, countdown = 1000; 236 RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_LOW_ADDR, dev_priv->gart_vm_start >> 12); 237 RADEON_WRITE(R600_VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12); 238 RADEON_WRITE(R600_VM_CONTEXT0_REQUEST_RESPONSE, 2); 239 240 do { 241 resp = RADEON_READ(R600_VM_CONTEXT0_REQUEST_RESPONSE); 242 countdown--; 243 DRM_UDELAY(1); 244 } while (((resp & 0xf0) == 0) && countdown); 245} 246 247static void r600_vm_init(struct drm_device *dev) 248{ 249 drm_radeon_private_t *dev_priv = dev->dev_private; 250 /* initialise the VM to use the page table we constructed up there */ 251 u32 vm_c0, i; 252 u32 mc_rd_a; 253 u32 vm_l2_cntl, vm_l2_cntl3; 254 /* okay set up the PCIE aperture type thingo */ 255 RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12); 256 RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12); 257 RADEON_WRITE(R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 258 259 /* setup MC RD a */ 260 mc_rd_a = R600_MCD_L1_TLB | R600_MCD_L1_FRAG_PROC | R600_MCD_SYSTEM_ACCESS_MODE_IN_SYS | 261 R600_MCD_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | R600_MCD_EFFECTIVE_L1_TLB_SIZE(5) | 262 R600_MCD_EFFECTIVE_L1_QUEUE_SIZE(5) | R600_MCD_WAIT_L2_QUERY; 263 264 RADEON_WRITE(R600_MCD_RD_A_CNTL, mc_rd_a); 265 RADEON_WRITE(R600_MCD_RD_B_CNTL, mc_rd_a); 266 267 RADEON_WRITE(R600_MCD_WR_A_CNTL, mc_rd_a); 268 RADEON_WRITE(R600_MCD_WR_B_CNTL, mc_rd_a); 269 270 RADEON_WRITE(R600_MCD_RD_GFX_CNTL, mc_rd_a); 271 RADEON_WRITE(R600_MCD_WR_GFX_CNTL, mc_rd_a); 272 273 RADEON_WRITE(R600_MCD_RD_SYS_CNTL, mc_rd_a); 274 RADEON_WRITE(R600_MCD_WR_SYS_CNTL, mc_rd_a); 275 276 RADEON_WRITE(R600_MCD_RD_HDP_CNTL, mc_rd_a | R600_MCD_L1_STRICT_ORDERING); 277 RADEON_WRITE(R600_MCD_WR_HDP_CNTL, mc_rd_a /*| R600_MCD_L1_STRICT_ORDERING*/); 278 279 RADEON_WRITE(R600_MCD_RD_PDMA_CNTL, mc_rd_a); 280 RADEON_WRITE(R600_MCD_WR_PDMA_CNTL, mc_rd_a); 281 282 RADEON_WRITE(R600_MCD_RD_SEM_CNTL, mc_rd_a | R600_MCD_SEMAPHORE_MODE); 283 RADEON_WRITE(R600_MCD_WR_SEM_CNTL, mc_rd_a); 284 285 vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W; 286 vm_l2_cntl |= R600_VM_L2_CNTL_QUEUE_SIZE(7); 287 RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl); 288 289 RADEON_WRITE(R600_VM_L2_CNTL2, 0); 290 vm_l2_cntl3 = (R600_VM_L2_CNTL3_BANK_SELECT_0(0) | 291 R600_VM_L2_CNTL3_BANK_SELECT_1(1) | 292 R600_VM_L2_CNTL3_CACHE_UPDATE_MODE(2)); 293 RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3); 294 295 vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT; 296 297 RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0); 298 299 vm_c0 &= ~R600_VM_ENABLE_CONTEXT; 300 301 /* disable all other contexts */ 302 for (i = 1; i < 8; i++) 303 RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0); 304 305 RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12); 306 RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12); 307 RADEON_WRITE(R600_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12); 308 309 r600_vm_flush_gart_range(dev); 310} 311 312static int r600_cp_init_microcode(drm_radeon_private_t *dev_priv) 313{ 314 struct platform_device *pdev; 315 const char *chip_name; 316 size_t pfp_req_size, me_req_size; 317 char fw_name[30]; 318 int err; 319 320 pdev = platform_device_register_simple("r600_cp", 0, NULL, 0); 321 err = IS_ERR(pdev); 322 if (err) { 323 printk(KERN_ERR "r600_cp: Failed to register firmware\n"); 324 return -EINVAL; 325 } 326 327 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 328 case CHIP_R600: chip_name = "R600"; break; 329 case CHIP_RV610: chip_name = "RV610"; break; 330 case CHIP_RV630: chip_name = "RV630"; break; 331 case CHIP_RV620: chip_name = "RV620"; break; 332 case CHIP_RV635: chip_name = "RV635"; break; 333 case CHIP_RV670: chip_name = "RV670"; break; 334 case CHIP_RS780: 335 case CHIP_RS880: chip_name = "RS780"; break; 336 case CHIP_RV770: chip_name = "RV770"; break; 337 case CHIP_RV730: 338 case CHIP_RV740: chip_name = "RV730"; break; 339 case CHIP_RV710: chip_name = "RV710"; break; 340 default: BUG(); 341 } 342 343 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { 344 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 345 me_req_size = R700_PM4_UCODE_SIZE * 4; 346 } else { 347 pfp_req_size = PFP_UCODE_SIZE * 4; 348 me_req_size = PM4_UCODE_SIZE * 12; 349 } 350 351 DRM_INFO("Loading %s CP Microcode\n", chip_name); 352 353 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 354 err = request_firmware(&dev_priv->pfp_fw, fw_name, &pdev->dev); 355 if (err) 356 goto out; 357 if (dev_priv->pfp_fw->size != pfp_req_size) { 358 printk(KERN_ERR 359 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 360 dev_priv->pfp_fw->size, fw_name); 361 err = -EINVAL; 362 goto out; 363 } 364 365 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); 366 err = request_firmware(&dev_priv->me_fw, fw_name, &pdev->dev); 367 if (err) 368 goto out; 369 if (dev_priv->me_fw->size != me_req_size) { 370 printk(KERN_ERR 371 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 372 dev_priv->me_fw->size, fw_name); 373 err = -EINVAL; 374 } 375out: 376 platform_device_unregister(pdev); 377 378 if (err) { 379 if (err != -EINVAL) 380 printk(KERN_ERR 381 "r600_cp: Failed to load firmware \"%s\"\n", 382 fw_name); 383 release_firmware(dev_priv->pfp_fw); 384 dev_priv->pfp_fw = NULL; 385 release_firmware(dev_priv->me_fw); 386 dev_priv->me_fw = NULL; 387 } 388 return err; 389} 390 391static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv) 392{ 393 const __be32 *fw_data; 394 int i; 395 396 if (!dev_priv->me_fw || !dev_priv->pfp_fw) 397 return; 398 399 r600_do_cp_stop(dev_priv); 400 401 RADEON_WRITE(R600_CP_RB_CNTL, 402#ifdef __BIG_ENDIAN 403 R600_BUF_SWAP_32BIT | 404#endif 405 R600_RB_NO_UPDATE | 406 R600_RB_BLKSZ(15) | 407 R600_RB_BUFSZ(3)); 408 409 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 410 RADEON_READ(R600_GRBM_SOFT_RESET); 411 mdelay(15); 412 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); 413 414 fw_data = (const __be32 *)dev_priv->me_fw->data; 415 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 416 for (i = 0; i < PM4_UCODE_SIZE * 3; i++) 417 RADEON_WRITE(R600_CP_ME_RAM_DATA, 418 be32_to_cpup(fw_data++)); 419 420 fw_data = (const __be32 *)dev_priv->pfp_fw->data; 421 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 422 for (i = 0; i < PFP_UCODE_SIZE; i++) 423 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, 424 be32_to_cpup(fw_data++)); 425 426 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 427 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 428 RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0); 429 430} 431 432static void r700_vm_init(struct drm_device *dev) 433{ 434 drm_radeon_private_t *dev_priv = dev->dev_private; 435 /* initialise the VM to use the page table we constructed up there */ 436 u32 vm_c0, i; 437 u32 mc_vm_md_l1; 438 u32 vm_l2_cntl, vm_l2_cntl3; 439 /* okay set up the PCIE aperture type thingo */ 440 RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR, dev_priv->gart_vm_start >> 12); 441 RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12); 442 RADEON_WRITE(R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 443 444 mc_vm_md_l1 = R700_ENABLE_L1_TLB | 445 R700_ENABLE_L1_FRAGMENT_PROCESSING | 446 R700_SYSTEM_ACCESS_MODE_IN_SYS | 447 R700_SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | 448 R700_EFFECTIVE_L1_TLB_SIZE(5) | 449 R700_EFFECTIVE_L1_QUEUE_SIZE(5); 450 451 RADEON_WRITE(R700_MC_VM_MD_L1_TLB0_CNTL, mc_vm_md_l1); 452 RADEON_WRITE(R700_MC_VM_MD_L1_TLB1_CNTL, mc_vm_md_l1); 453 RADEON_WRITE(R700_MC_VM_MD_L1_TLB2_CNTL, mc_vm_md_l1); 454 RADEON_WRITE(R700_MC_VM_MB_L1_TLB0_CNTL, mc_vm_md_l1); 455 RADEON_WRITE(R700_MC_VM_MB_L1_TLB1_CNTL, mc_vm_md_l1); 456 RADEON_WRITE(R700_MC_VM_MB_L1_TLB2_CNTL, mc_vm_md_l1); 457 RADEON_WRITE(R700_MC_VM_MB_L1_TLB3_CNTL, mc_vm_md_l1); 458 459 vm_l2_cntl = R600_VM_L2_CACHE_EN | R600_VM_L2_FRAG_PROC | R600_VM_ENABLE_PTE_CACHE_LRU_W; 460 vm_l2_cntl |= R700_VM_L2_CNTL_QUEUE_SIZE(7); 461 RADEON_WRITE(R600_VM_L2_CNTL, vm_l2_cntl); 462 463 RADEON_WRITE(R600_VM_L2_CNTL2, 0); 464 vm_l2_cntl3 = R700_VM_L2_CNTL3_BANK_SELECT(0) | R700_VM_L2_CNTL3_CACHE_UPDATE_MODE(2); 465 RADEON_WRITE(R600_VM_L2_CNTL3, vm_l2_cntl3); 466 467 vm_c0 = R600_VM_ENABLE_CONTEXT | R600_VM_PAGE_TABLE_DEPTH_FLAT; 468 469 RADEON_WRITE(R600_VM_CONTEXT0_CNTL, vm_c0); 470 471 vm_c0 &= ~R600_VM_ENABLE_CONTEXT; 472 473 /* disable all other contexts */ 474 for (i = 1; i < 8; i++) 475 RADEON_WRITE(R600_VM_CONTEXT0_CNTL + (i * 4), vm_c0); 476 477 RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, dev_priv->gart_info.bus_addr >> 12); 478 RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_START_ADDR, dev_priv->gart_vm_start >> 12); 479 RADEON_WRITE(R700_VM_CONTEXT0_PAGE_TABLE_END_ADDR, (dev_priv->gart_vm_start + dev_priv->gart_size - 1) >> 12); 480 481 r600_vm_flush_gart_range(dev); 482} 483 484static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv) 485{ 486 const __be32 *fw_data; 487 int i; 488 489 if (!dev_priv->me_fw || !dev_priv->pfp_fw) 490 return; 491 492 r600_do_cp_stop(dev_priv); 493 494 RADEON_WRITE(R600_CP_RB_CNTL, 495#ifdef __BIG_ENDIAN 496 R600_BUF_SWAP_32BIT | 497#endif 498 R600_RB_NO_UPDATE | 499 R600_RB_BLKSZ(15) | 500 R600_RB_BUFSZ(3)); 501 502 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 503 RADEON_READ(R600_GRBM_SOFT_RESET); 504 mdelay(15); 505 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); 506 507 fw_data = (const __be32 *)dev_priv->pfp_fw->data; 508 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 509 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 510 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 511 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 512 513 fw_data = (const __be32 *)dev_priv->me_fw->data; 514 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 515 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 516 RADEON_WRITE(R600_CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 517 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 518 519 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 520 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 521 RADEON_WRITE(R600_CP_ME_RAM_RADDR, 0); 522 523} 524 525static void r600_test_writeback(drm_radeon_private_t *dev_priv) 526{ 527 u32 tmp; 528 529 /* Start with assuming that writeback doesn't work */ 530 dev_priv->writeback_works = 0; 531 532 /* Writeback doesn't seem to work everywhere, test it here and possibly 533 * enable it if it appears to work 534 */ 535 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0); 536 537 RADEON_WRITE(R600_SCRATCH_REG1, 0xdeadbeef); 538 539 for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) { 540 u32 val; 541 542 val = radeon_read_ring_rptr(dev_priv, R600_SCRATCHOFF(1)); 543 if (val == 0xdeadbeef) 544 break; 545 DRM_UDELAY(1); 546 } 547 548 if (tmp < dev_priv->usec_timeout) { 549 dev_priv->writeback_works = 1; 550 DRM_INFO("writeback test succeeded in %d usecs\n", tmp); 551 } else { 552 dev_priv->writeback_works = 0; 553 DRM_INFO("writeback test failed\n"); 554 } 555 if (radeon_no_wb == 1) { 556 dev_priv->writeback_works = 0; 557 DRM_INFO("writeback forced off\n"); 558 } 559 560 if (!dev_priv->writeback_works) { 561 /* Disable writeback to avoid unnecessary bus master transfer */ 562 RADEON_WRITE(R600_CP_RB_CNTL, 563#ifdef __BIG_ENDIAN 564 R600_BUF_SWAP_32BIT | 565#endif 566 RADEON_READ(R600_CP_RB_CNTL) | 567 R600_RB_NO_UPDATE); 568 RADEON_WRITE(R600_SCRATCH_UMSK, 0); 569 } 570} 571 572int r600_do_engine_reset(struct drm_device *dev) 573{ 574 drm_radeon_private_t *dev_priv = dev->dev_private; 575 u32 cp_ptr, cp_me_cntl, cp_rb_cntl; 576 577 DRM_INFO("Resetting GPU\n"); 578 579 cp_ptr = RADEON_READ(R600_CP_RB_WPTR); 580 cp_me_cntl = RADEON_READ(R600_CP_ME_CNTL); 581 RADEON_WRITE(R600_CP_ME_CNTL, R600_CP_ME_HALT); 582 583 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0x7fff); 584 RADEON_READ(R600_GRBM_SOFT_RESET); 585 DRM_UDELAY(50); 586 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); 587 RADEON_READ(R600_GRBM_SOFT_RESET); 588 589 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); 590 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); 591 RADEON_WRITE(R600_CP_RB_CNTL, 592#ifdef __BIG_ENDIAN 593 R600_BUF_SWAP_32BIT | 594#endif 595 R600_RB_RPTR_WR_ENA); 596 597 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); 598 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); 599 RADEON_WRITE(R600_CP_RB_CNTL, cp_rb_cntl); 600 RADEON_WRITE(R600_CP_ME_CNTL, cp_me_cntl); 601 602 /* Reset the CP ring */ 603 r600_do_cp_reset(dev_priv); 604 605 /* The CP is no longer running after an engine reset */ 606 dev_priv->cp_running = 0; 607 608 /* Reset any pending vertex, indirect buffers */ 609 radeon_freelist_reset(dev); 610 611 return 0; 612 613} 614 615static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 616 u32 num_backends, 617 u32 backend_disable_mask) 618{ 619 u32 backend_map = 0; 620 u32 enabled_backends_mask; 621 u32 enabled_backends_count; 622 u32 cur_pipe; 623 u32 swizzle_pipe[R6XX_MAX_PIPES]; 624 u32 cur_backend; 625 u32 i; 626 627 if (num_tile_pipes > R6XX_MAX_PIPES) 628 num_tile_pipes = R6XX_MAX_PIPES; 629 if (num_tile_pipes < 1) 630 num_tile_pipes = 1; 631 if (num_backends > R6XX_MAX_BACKENDS) 632 num_backends = R6XX_MAX_BACKENDS; 633 if (num_backends < 1) 634 num_backends = 1; 635 636 enabled_backends_mask = 0; 637 enabled_backends_count = 0; 638 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { 639 if (((backend_disable_mask >> i) & 1) == 0) { 640 enabled_backends_mask |= (1 << i); 641 ++enabled_backends_count; 642 } 643 if (enabled_backends_count == num_backends) 644 break; 645 } 646 647 if (enabled_backends_count == 0) { 648 enabled_backends_mask = 1; 649 enabled_backends_count = 1; 650 } 651 652 if (enabled_backends_count != num_backends) 653 num_backends = enabled_backends_count; 654 655 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES); 656 switch (num_tile_pipes) { 657 case 1: 658 swizzle_pipe[0] = 0; 659 break; 660 case 2: 661 swizzle_pipe[0] = 0; 662 swizzle_pipe[1] = 1; 663 break; 664 case 3: 665 swizzle_pipe[0] = 0; 666 swizzle_pipe[1] = 1; 667 swizzle_pipe[2] = 2; 668 break; 669 case 4: 670 swizzle_pipe[0] = 0; 671 swizzle_pipe[1] = 1; 672 swizzle_pipe[2] = 2; 673 swizzle_pipe[3] = 3; 674 break; 675 case 5: 676 swizzle_pipe[0] = 0; 677 swizzle_pipe[1] = 1; 678 swizzle_pipe[2] = 2; 679 swizzle_pipe[3] = 3; 680 swizzle_pipe[4] = 4; 681 break; 682 case 6: 683 swizzle_pipe[0] = 0; 684 swizzle_pipe[1] = 2; 685 swizzle_pipe[2] = 4; 686 swizzle_pipe[3] = 5; 687 swizzle_pipe[4] = 1; 688 swizzle_pipe[5] = 3; 689 break; 690 case 7: 691 swizzle_pipe[0] = 0; 692 swizzle_pipe[1] = 2; 693 swizzle_pipe[2] = 4; 694 swizzle_pipe[3] = 6; 695 swizzle_pipe[4] = 1; 696 swizzle_pipe[5] = 3; 697 swizzle_pipe[6] = 5; 698 break; 699 case 8: 700 swizzle_pipe[0] = 0; 701 swizzle_pipe[1] = 2; 702 swizzle_pipe[2] = 4; 703 swizzle_pipe[3] = 6; 704 swizzle_pipe[4] = 1; 705 swizzle_pipe[5] = 3; 706 swizzle_pipe[6] = 5; 707 swizzle_pipe[7] = 7; 708 break; 709 } 710 711 cur_backend = 0; 712 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 713 while (((1 << cur_backend) & enabled_backends_mask) == 0) 714 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; 715 716 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); 717 718 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; 719 } 720 721 return backend_map; 722} 723 724static int r600_count_pipe_bits(uint32_t val) 725{ 726 return hweight32(val); 727} 728 729static void r600_gfx_init(struct drm_device *dev, 730 drm_radeon_private_t *dev_priv) 731{ 732 int i, j, num_qd_pipes; 733 u32 sx_debug_1; 734 u32 tc_cntl; 735 u32 arb_pop; 736 u32 num_gs_verts_per_thread; 737 u32 vgt_gs_per_es; 738 u32 gs_prim_buffer_depth = 0; 739 u32 sq_ms_fifo_sizes; 740 u32 sq_config; 741 u32 sq_gpr_resource_mgmt_1 = 0; 742 u32 sq_gpr_resource_mgmt_2 = 0; 743 u32 sq_thread_resource_mgmt = 0; 744 u32 sq_stack_resource_mgmt_1 = 0; 745 u32 sq_stack_resource_mgmt_2 = 0; 746 u32 hdp_host_path_cntl; 747 u32 backend_map; 748 u32 gb_tiling_config = 0; 749 u32 cc_rb_backend_disable; 750 u32 cc_gc_shader_pipe_config; 751 u32 ramcfg; 752 753 /* setup chip specs */ 754 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 755 case CHIP_R600: 756 dev_priv->r600_max_pipes = 4; 757 dev_priv->r600_max_tile_pipes = 8; 758 dev_priv->r600_max_simds = 4; 759 dev_priv->r600_max_backends = 4; 760 dev_priv->r600_max_gprs = 256; 761 dev_priv->r600_max_threads = 192; 762 dev_priv->r600_max_stack_entries = 256; 763 dev_priv->r600_max_hw_contexts = 8; 764 dev_priv->r600_max_gs_threads = 16; 765 dev_priv->r600_sx_max_export_size = 128; 766 dev_priv->r600_sx_max_export_pos_size = 16; 767 dev_priv->r600_sx_max_export_smx_size = 128; 768 dev_priv->r600_sq_num_cf_insts = 2; 769 break; 770 case CHIP_RV630: 771 case CHIP_RV635: 772 dev_priv->r600_max_pipes = 2; 773 dev_priv->r600_max_tile_pipes = 2; 774 dev_priv->r600_max_simds = 3; 775 dev_priv->r600_max_backends = 1; 776 dev_priv->r600_max_gprs = 128; 777 dev_priv->r600_max_threads = 192; 778 dev_priv->r600_max_stack_entries = 128; 779 dev_priv->r600_max_hw_contexts = 8; 780 dev_priv->r600_max_gs_threads = 4; 781 dev_priv->r600_sx_max_export_size = 128; 782 dev_priv->r600_sx_max_export_pos_size = 16; 783 dev_priv->r600_sx_max_export_smx_size = 128; 784 dev_priv->r600_sq_num_cf_insts = 2; 785 break; 786 case CHIP_RV610: 787 case CHIP_RS780: 788 case CHIP_RS880: 789 case CHIP_RV620: 790 dev_priv->r600_max_pipes = 1; 791 dev_priv->r600_max_tile_pipes = 1; 792 dev_priv->r600_max_simds = 2; 793 dev_priv->r600_max_backends = 1; 794 dev_priv->r600_max_gprs = 128; 795 dev_priv->r600_max_threads = 192; 796 dev_priv->r600_max_stack_entries = 128; 797 dev_priv->r600_max_hw_contexts = 4; 798 dev_priv->r600_max_gs_threads = 4; 799 dev_priv->r600_sx_max_export_size = 128; 800 dev_priv->r600_sx_max_export_pos_size = 16; 801 dev_priv->r600_sx_max_export_smx_size = 128; 802 dev_priv->r600_sq_num_cf_insts = 1; 803 break; 804 case CHIP_RV670: 805 dev_priv->r600_max_pipes = 4; 806 dev_priv->r600_max_tile_pipes = 4; 807 dev_priv->r600_max_simds = 4; 808 dev_priv->r600_max_backends = 4; 809 dev_priv->r600_max_gprs = 192; 810 dev_priv->r600_max_threads = 192; 811 dev_priv->r600_max_stack_entries = 256; 812 dev_priv->r600_max_hw_contexts = 8; 813 dev_priv->r600_max_gs_threads = 16; 814 dev_priv->r600_sx_max_export_size = 128; 815 dev_priv->r600_sx_max_export_pos_size = 16; 816 dev_priv->r600_sx_max_export_smx_size = 128; 817 dev_priv->r600_sq_num_cf_insts = 2; 818 break; 819 default: 820 break; 821 } 822 823 /* Initialize HDP */ 824 j = 0; 825 for (i = 0; i < 32; i++) { 826 RADEON_WRITE((0x2c14 + j), 0x00000000); 827 RADEON_WRITE((0x2c18 + j), 0x00000000); 828 RADEON_WRITE((0x2c1c + j), 0x00000000); 829 RADEON_WRITE((0x2c20 + j), 0x00000000); 830 RADEON_WRITE((0x2c24 + j), 0x00000000); 831 j += 0x18; 832 } 833 834 RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff)); 835 836 /* setup tiling, simd, pipe config */ 837 ramcfg = RADEON_READ(R600_RAMCFG); 838 839 switch (dev_priv->r600_max_tile_pipes) { 840 case 1: 841 gb_tiling_config |= R600_PIPE_TILING(0); 842 break; 843 case 2: 844 gb_tiling_config |= R600_PIPE_TILING(1); 845 break; 846 case 4: 847 gb_tiling_config |= R600_PIPE_TILING(2); 848 break; 849 case 8: 850 gb_tiling_config |= R600_PIPE_TILING(3); 851 break; 852 default: 853 break; 854 } 855 856 gb_tiling_config |= R600_BANK_TILING((ramcfg >> R600_NOOFBANK_SHIFT) & R600_NOOFBANK_MASK); 857 858 gb_tiling_config |= R600_GROUP_SIZE(0); 859 860 if (((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK) > 3) { 861 gb_tiling_config |= R600_ROW_TILING(3); 862 gb_tiling_config |= R600_SAMPLE_SPLIT(3); 863 } else { 864 gb_tiling_config |= 865 R600_ROW_TILING(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK)); 866 gb_tiling_config |= 867 R600_SAMPLE_SPLIT(((ramcfg >> R600_NOOFROWS_SHIFT) & R600_NOOFROWS_MASK)); 868 } 869 870 gb_tiling_config |= R600_BANK_SWAPS(1); 871 872 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000; 873 cc_rb_backend_disable |= 874 R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK); 875 876 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 877 cc_gc_shader_pipe_config |= 878 R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK); 879 cc_gc_shader_pipe_config |= 880 R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK); 881 882 backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, 883 (R6XX_MAX_BACKENDS - 884 r600_count_pipe_bits((cc_rb_backend_disable & 885 R6XX_MAX_BACKENDS_MASK) >> 16)), 886 (cc_rb_backend_disable >> 16)); 887 gb_tiling_config |= R600_BACKEND_MAP(backend_map); 888 889 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); 890 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 891 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 892 if (gb_tiling_config & 0xc0) { 893 dev_priv->r600_group_size = 512; 894 } else { 895 dev_priv->r600_group_size = 256; 896 } 897 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7); 898 if (gb_tiling_config & 0x30) { 899 dev_priv->r600_nbanks = 8; 900 } else { 901 dev_priv->r600_nbanks = 4; 902 } 903 904 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 905 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 906 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 907 908 num_qd_pipes = 909 R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); 910 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); 911 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); 912 913 /* set HW defaults for 3D engine */ 914 RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) | 915 R600_ROQ_IB2_START(0x2b))); 916 917 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, (R600_MEQ_END(0x40) | 918 R600_ROQ_END(0x40))); 919 920 RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO | 921 R600_SYNC_GRADIENT | 922 R600_SYNC_WALKER | 923 R600_SYNC_ALIGNER)); 924 925 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670) 926 RADEON_WRITE(R600_ARB_GDEC_RD_CNTL, 0x00000021); 927 928 sx_debug_1 = RADEON_READ(R600_SX_DEBUG_1); 929 sx_debug_1 |= R600_SMX_EVENT_RELEASE; 930 if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600)) 931 sx_debug_1 |= R600_ENABLE_NEW_SMX_ADDRESS; 932 RADEON_WRITE(R600_SX_DEBUG_1, sx_debug_1); 933 934 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) || 935 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || 936 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || 937 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || 938 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || 939 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) 940 RADEON_WRITE(R600_DB_DEBUG, R600_PREZ_MUST_WAIT_FOR_POSTZ_DONE); 941 else 942 RADEON_WRITE(R600_DB_DEBUG, 0); 943 944 RADEON_WRITE(R600_DB_WATERMARKS, (R600_DEPTH_FREE(4) | 945 R600_DEPTH_FLUSH(16) | 946 R600_DEPTH_PENDING_FREE(4) | 947 R600_DEPTH_CACHELINE_FREE(16))); 948 RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0); 949 RADEON_WRITE(R600_VGT_NUM_INSTANCES, 0); 950 951 RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0)); 952 RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(0)); 953 954 sq_ms_fifo_sizes = RADEON_READ(R600_SQ_MS_FIFO_SIZES); 955 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || 956 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || 957 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || 958 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { 959 sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(0xa) | 960 R600_FETCH_FIFO_HIWATER(0xa) | 961 R600_DONE_FIFO_HIWATER(0xe0) | 962 R600_ALU_UPDATE_FIFO_HIWATER(0x8)); 963 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) || 964 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630)) { 965 sq_ms_fifo_sizes &= ~R600_DONE_FIFO_HIWATER(0xff); 966 sq_ms_fifo_sizes |= R600_DONE_FIFO_HIWATER(0x4); 967 } 968 RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes); 969 970 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 971 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 972 */ 973 sq_config = RADEON_READ(R600_SQ_CONFIG); 974 sq_config &= ~(R600_PS_PRIO(3) | 975 R600_VS_PRIO(3) | 976 R600_GS_PRIO(3) | 977 R600_ES_PRIO(3)); 978 sq_config |= (R600_DX9_CONSTS | 979 R600_VC_ENABLE | 980 R600_PS_PRIO(0) | 981 R600_VS_PRIO(1) | 982 R600_GS_PRIO(2) | 983 R600_ES_PRIO(3)); 984 985 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R600) { 986 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(124) | 987 R600_NUM_VS_GPRS(124) | 988 R600_NUM_CLAUSE_TEMP_GPRS(4)); 989 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(0) | 990 R600_NUM_ES_GPRS(0)); 991 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(136) | 992 R600_NUM_VS_THREADS(48) | 993 R600_NUM_GS_THREADS(4) | 994 R600_NUM_ES_THREADS(4)); 995 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(128) | 996 R600_NUM_VS_STACK_ENTRIES(128)); 997 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(0) | 998 R600_NUM_ES_STACK_ENTRIES(0)); 999 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || 1000 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || 1001 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || 1002 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) { 1003 /* no vertex cache */ 1004 sq_config &= ~R600_VC_ENABLE; 1005 1006 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) | 1007 R600_NUM_VS_GPRS(44) | 1008 R600_NUM_CLAUSE_TEMP_GPRS(2)); 1009 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) | 1010 R600_NUM_ES_GPRS(17)); 1011 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) | 1012 R600_NUM_VS_THREADS(78) | 1013 R600_NUM_GS_THREADS(4) | 1014 R600_NUM_ES_THREADS(31)); 1015 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) | 1016 R600_NUM_VS_STACK_ENTRIES(40)); 1017 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) | 1018 R600_NUM_ES_STACK_ENTRIES(16)); 1019 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV630) || 1020 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV635)) { 1021 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) | 1022 R600_NUM_VS_GPRS(44) | 1023 R600_NUM_CLAUSE_TEMP_GPRS(2)); 1024 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(18) | 1025 R600_NUM_ES_GPRS(18)); 1026 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) | 1027 R600_NUM_VS_THREADS(78) | 1028 R600_NUM_GS_THREADS(4) | 1029 R600_NUM_ES_THREADS(31)); 1030 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(40) | 1031 R600_NUM_VS_STACK_ENTRIES(40)); 1032 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(32) | 1033 R600_NUM_ES_STACK_ENTRIES(16)); 1034 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV670) { 1035 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(44) | 1036 R600_NUM_VS_GPRS(44) | 1037 R600_NUM_CLAUSE_TEMP_GPRS(2)); 1038 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(17) | 1039 R600_NUM_ES_GPRS(17)); 1040 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(79) | 1041 R600_NUM_VS_THREADS(78) | 1042 R600_NUM_GS_THREADS(4) | 1043 R600_NUM_ES_THREADS(31)); 1044 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(64) | 1045 R600_NUM_VS_STACK_ENTRIES(64)); 1046 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(64) | 1047 R600_NUM_ES_STACK_ENTRIES(64)); 1048 } 1049 1050 RADEON_WRITE(R600_SQ_CONFIG, sq_config); 1051 RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); 1052 RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); 1053 RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 1054 RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); 1055 RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); 1056 1057 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) || 1058 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) || 1059 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) || 1060 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880)) 1061 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_TC_ONLY)); 1062 else 1063 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, R600_CACHE_INVALIDATION(R600_VC_AND_TC)); 1064 1065 RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_2S, (R600_S0_X(0xc) | 1066 R600_S0_Y(0x4) | 1067 R600_S1_X(0x4) | 1068 R600_S1_Y(0xc))); 1069 RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_4S, (R600_S0_X(0xe) | 1070 R600_S0_Y(0xe) | 1071 R600_S1_X(0x2) | 1072 R600_S1_Y(0x2) | 1073 R600_S2_X(0xa) | 1074 R600_S2_Y(0x6) | 1075 R600_S3_X(0x6) | 1076 R600_S3_Y(0xa))); 1077 RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD0, (R600_S0_X(0xe) | 1078 R600_S0_Y(0xb) | 1079 R600_S1_X(0x4) | 1080 R600_S1_Y(0xc) | 1081 R600_S2_X(0x1) | 1082 R600_S2_Y(0x6) | 1083 R600_S3_X(0xa) | 1084 R600_S3_Y(0xe))); 1085 RADEON_WRITE(R600_PA_SC_AA_SAMPLE_LOCS_8S_WD1, (R600_S4_X(0x6) | 1086 R600_S4_Y(0x1) | 1087 R600_S5_X(0x0) | 1088 R600_S5_Y(0x0) | 1089 R600_S6_X(0xb) | 1090 R600_S6_Y(0x4) | 1091 R600_S7_X(0x7) | 1092 R600_S7_Y(0x8))); 1093 1094 1095 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1096 case CHIP_R600: 1097 case CHIP_RV630: 1098 case CHIP_RV635: 1099 gs_prim_buffer_depth = 0; 1100 break; 1101 case CHIP_RV610: 1102 case CHIP_RS780: 1103 case CHIP_RS880: 1104 case CHIP_RV620: 1105 gs_prim_buffer_depth = 32; 1106 break; 1107 case CHIP_RV670: 1108 gs_prim_buffer_depth = 128; 1109 break; 1110 default: 1111 break; 1112 } 1113 1114 num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16; 1115 vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread; 1116 /* Max value for this is 256 */ 1117 if (vgt_gs_per_es > 256) 1118 vgt_gs_per_es = 256; 1119 1120 RADEON_WRITE(R600_VGT_ES_PER_GS, 128); 1121 RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es); 1122 RADEON_WRITE(R600_VGT_GS_PER_VS, 2); 1123 RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16); 1124 1125 /* more default values. 2D/3D driver should adjust as needed */ 1126 RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0); 1127 RADEON_WRITE(R600_VGT_STRMOUT_EN, 0); 1128 RADEON_WRITE(R600_SX_MISC, 0); 1129 RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0); 1130 RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0); 1131 RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0); 1132 RADEON_WRITE(R600_SPI_INPUT_Z, 0); 1133 RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2)); 1134 RADEON_WRITE(R600_CB_COLOR7_FRAG, 0); 1135 1136 /* clear render buffer base addresses */ 1137 RADEON_WRITE(R600_CB_COLOR0_BASE, 0); 1138 RADEON_WRITE(R600_CB_COLOR1_BASE, 0); 1139 RADEON_WRITE(R600_CB_COLOR2_BASE, 0); 1140 RADEON_WRITE(R600_CB_COLOR3_BASE, 0); 1141 RADEON_WRITE(R600_CB_COLOR4_BASE, 0); 1142 RADEON_WRITE(R600_CB_COLOR5_BASE, 0); 1143 RADEON_WRITE(R600_CB_COLOR6_BASE, 0); 1144 RADEON_WRITE(R600_CB_COLOR7_BASE, 0); 1145 1146 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1147 case CHIP_RV610: 1148 case CHIP_RS780: 1149 case CHIP_RS880: 1150 case CHIP_RV620: 1151 tc_cntl = R600_TC_L2_SIZE(8); 1152 break; 1153 case CHIP_RV630: 1154 case CHIP_RV635: 1155 tc_cntl = R600_TC_L2_SIZE(4); 1156 break; 1157 case CHIP_R600: 1158 tc_cntl = R600_TC_L2_SIZE(0) | R600_L2_DISABLE_LATE_HIT; 1159 break; 1160 default: 1161 tc_cntl = R600_TC_L2_SIZE(0); 1162 break; 1163 } 1164 1165 RADEON_WRITE(R600_TC_CNTL, tc_cntl); 1166 1167 hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL); 1168 RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 1169 1170 arb_pop = RADEON_READ(R600_ARB_POP); 1171 arb_pop |= R600_ENABLE_TC128; 1172 RADEON_WRITE(R600_ARB_POP, arb_pop); 1173 1174 RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0); 1175 RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA | 1176 R600_NUM_CLIP_SEQ(3))); 1177 RADEON_WRITE(R600_PA_SC_ENHANCE, R600_FORCE_EOV_MAX_CLK_CNT(4095)); 1178 1179} 1180 1181static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv, 1182 u32 num_tile_pipes, 1183 u32 num_backends, 1184 u32 backend_disable_mask) 1185{ 1186 u32 backend_map = 0; 1187 u32 enabled_backends_mask; 1188 u32 enabled_backends_count; 1189 u32 cur_pipe; 1190 u32 swizzle_pipe[R7XX_MAX_PIPES]; 1191 u32 cur_backend; 1192 u32 i; 1193 bool force_no_swizzle; 1194 1195 if (num_tile_pipes > R7XX_MAX_PIPES) 1196 num_tile_pipes = R7XX_MAX_PIPES; 1197 if (num_tile_pipes < 1) 1198 num_tile_pipes = 1; 1199 if (num_backends > R7XX_MAX_BACKENDS) 1200 num_backends = R7XX_MAX_BACKENDS; 1201 if (num_backends < 1) 1202 num_backends = 1; 1203 1204 enabled_backends_mask = 0; 1205 enabled_backends_count = 0; 1206 for (i = 0; i < R7XX_MAX_BACKENDS; ++i) { 1207 if (((backend_disable_mask >> i) & 1) == 0) { 1208 enabled_backends_mask |= (1 << i); 1209 ++enabled_backends_count; 1210 } 1211 if (enabled_backends_count == num_backends) 1212 break; 1213 } 1214 1215 if (enabled_backends_count == 0) { 1216 enabled_backends_mask = 1; 1217 enabled_backends_count = 1; 1218 } 1219 1220 if (enabled_backends_count != num_backends) 1221 num_backends = enabled_backends_count; 1222 1223 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1224 case CHIP_RV770: 1225 case CHIP_RV730: 1226 force_no_swizzle = false; 1227 break; 1228 case CHIP_RV710: 1229 case CHIP_RV740: 1230 default: 1231 force_no_swizzle = true; 1232 break; 1233 } 1234 1235 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); 1236 switch (num_tile_pipes) { 1237 case 1: 1238 swizzle_pipe[0] = 0; 1239 break; 1240 case 2: 1241 swizzle_pipe[0] = 0; 1242 swizzle_pipe[1] = 1; 1243 break; 1244 case 3: 1245 if (force_no_swizzle) { 1246 swizzle_pipe[0] = 0; 1247 swizzle_pipe[1] = 1; 1248 swizzle_pipe[2] = 2; 1249 } else { 1250 swizzle_pipe[0] = 0; 1251 swizzle_pipe[1] = 2; 1252 swizzle_pipe[2] = 1; 1253 } 1254 break; 1255 case 4: 1256 if (force_no_swizzle) { 1257 swizzle_pipe[0] = 0; 1258 swizzle_pipe[1] = 1; 1259 swizzle_pipe[2] = 2; 1260 swizzle_pipe[3] = 3; 1261 } else { 1262 swizzle_pipe[0] = 0; 1263 swizzle_pipe[1] = 2; 1264 swizzle_pipe[2] = 3; 1265 swizzle_pipe[3] = 1; 1266 } 1267 break; 1268 case 5: 1269 if (force_no_swizzle) { 1270 swizzle_pipe[0] = 0; 1271 swizzle_pipe[1] = 1; 1272 swizzle_pipe[2] = 2; 1273 swizzle_pipe[3] = 3; 1274 swizzle_pipe[4] = 4; 1275 } else { 1276 swizzle_pipe[0] = 0; 1277 swizzle_pipe[1] = 2; 1278 swizzle_pipe[2] = 4; 1279 swizzle_pipe[3] = 1; 1280 swizzle_pipe[4] = 3; 1281 } 1282 break; 1283 case 6: 1284 if (force_no_swizzle) { 1285 swizzle_pipe[0] = 0; 1286 swizzle_pipe[1] = 1; 1287 swizzle_pipe[2] = 2; 1288 swizzle_pipe[3] = 3; 1289 swizzle_pipe[4] = 4; 1290 swizzle_pipe[5] = 5; 1291 } else { 1292 swizzle_pipe[0] = 0; 1293 swizzle_pipe[1] = 2; 1294 swizzle_pipe[2] = 4; 1295 swizzle_pipe[3] = 5; 1296 swizzle_pipe[4] = 3; 1297 swizzle_pipe[5] = 1; 1298 } 1299 break; 1300 case 7: 1301 if (force_no_swizzle) { 1302 swizzle_pipe[0] = 0; 1303 swizzle_pipe[1] = 1; 1304 swizzle_pipe[2] = 2; 1305 swizzle_pipe[3] = 3; 1306 swizzle_pipe[4] = 4; 1307 swizzle_pipe[5] = 5; 1308 swizzle_pipe[6] = 6; 1309 } else { 1310 swizzle_pipe[0] = 0; 1311 swizzle_pipe[1] = 2; 1312 swizzle_pipe[2] = 4; 1313 swizzle_pipe[3] = 6; 1314 swizzle_pipe[4] = 3; 1315 swizzle_pipe[5] = 1; 1316 swizzle_pipe[6] = 5; 1317 } 1318 break; 1319 case 8: 1320 if (force_no_swizzle) { 1321 swizzle_pipe[0] = 0; 1322 swizzle_pipe[1] = 1; 1323 swizzle_pipe[2] = 2; 1324 swizzle_pipe[3] = 3; 1325 swizzle_pipe[4] = 4; 1326 swizzle_pipe[5] = 5; 1327 swizzle_pipe[6] = 6; 1328 swizzle_pipe[7] = 7; 1329 } else { 1330 swizzle_pipe[0] = 0; 1331 swizzle_pipe[1] = 2; 1332 swizzle_pipe[2] = 4; 1333 swizzle_pipe[3] = 6; 1334 swizzle_pipe[4] = 3; 1335 swizzle_pipe[5] = 1; 1336 swizzle_pipe[6] = 7; 1337 swizzle_pipe[7] = 5; 1338 } 1339 break; 1340 } 1341 1342 cur_backend = 0; 1343 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 1344 while (((1 << cur_backend) & enabled_backends_mask) == 0) 1345 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; 1346 1347 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); 1348 1349 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; 1350 } 1351 1352 return backend_map; 1353} 1354 1355static void r700_gfx_init(struct drm_device *dev, 1356 drm_radeon_private_t *dev_priv) 1357{ 1358 int i, j, num_qd_pipes; 1359 u32 ta_aux_cntl; 1360 u32 sx_debug_1; 1361 u32 smx_dc_ctl0; 1362 u32 db_debug3; 1363 u32 num_gs_verts_per_thread; 1364 u32 vgt_gs_per_es; 1365 u32 gs_prim_buffer_depth = 0; 1366 u32 sq_ms_fifo_sizes; 1367 u32 sq_config; 1368 u32 sq_thread_resource_mgmt; 1369 u32 hdp_host_path_cntl; 1370 u32 sq_dyn_gpr_size_simd_ab_0; 1371 u32 backend_map; 1372 u32 gb_tiling_config = 0; 1373 u32 cc_rb_backend_disable; 1374 u32 cc_gc_shader_pipe_config; 1375 u32 mc_arb_ramcfg; 1376 u32 db_debug4; 1377 1378 /* setup chip specs */ 1379 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1380 case CHIP_RV770: 1381 dev_priv->r600_max_pipes = 4; 1382 dev_priv->r600_max_tile_pipes = 8; 1383 dev_priv->r600_max_simds = 10; 1384 dev_priv->r600_max_backends = 4; 1385 dev_priv->r600_max_gprs = 256; 1386 dev_priv->r600_max_threads = 248; 1387 dev_priv->r600_max_stack_entries = 512; 1388 dev_priv->r600_max_hw_contexts = 8; 1389 dev_priv->r600_max_gs_threads = 16 * 2; 1390 dev_priv->r600_sx_max_export_size = 128; 1391 dev_priv->r600_sx_max_export_pos_size = 16; 1392 dev_priv->r600_sx_max_export_smx_size = 112; 1393 dev_priv->r600_sq_num_cf_insts = 2; 1394 1395 dev_priv->r700_sx_num_of_sets = 7; 1396 dev_priv->r700_sc_prim_fifo_size = 0xF9; 1397 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1398 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1399 break; 1400 case CHIP_RV730: 1401 dev_priv->r600_max_pipes = 2; 1402 dev_priv->r600_max_tile_pipes = 4; 1403 dev_priv->r600_max_simds = 8; 1404 dev_priv->r600_max_backends = 2; 1405 dev_priv->r600_max_gprs = 128; 1406 dev_priv->r600_max_threads = 248; 1407 dev_priv->r600_max_stack_entries = 256; 1408 dev_priv->r600_max_hw_contexts = 8; 1409 dev_priv->r600_max_gs_threads = 16 * 2; 1410 dev_priv->r600_sx_max_export_size = 256; 1411 dev_priv->r600_sx_max_export_pos_size = 32; 1412 dev_priv->r600_sx_max_export_smx_size = 224; 1413 dev_priv->r600_sq_num_cf_insts = 2; 1414 1415 dev_priv->r700_sx_num_of_sets = 7; 1416 dev_priv->r700_sc_prim_fifo_size = 0xf9; 1417 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1418 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1419 if (dev_priv->r600_sx_max_export_pos_size > 16) { 1420 dev_priv->r600_sx_max_export_pos_size -= 16; 1421 dev_priv->r600_sx_max_export_smx_size += 16; 1422 } 1423 break; 1424 case CHIP_RV710: 1425 dev_priv->r600_max_pipes = 2; 1426 dev_priv->r600_max_tile_pipes = 2; 1427 dev_priv->r600_max_simds = 2; 1428 dev_priv->r600_max_backends = 1; 1429 dev_priv->r600_max_gprs = 256; 1430 dev_priv->r600_max_threads = 192; 1431 dev_priv->r600_max_stack_entries = 256; 1432 dev_priv->r600_max_hw_contexts = 4; 1433 dev_priv->r600_max_gs_threads = 8 * 2; 1434 dev_priv->r600_sx_max_export_size = 128; 1435 dev_priv->r600_sx_max_export_pos_size = 16; 1436 dev_priv->r600_sx_max_export_smx_size = 112; 1437 dev_priv->r600_sq_num_cf_insts = 1; 1438 1439 dev_priv->r700_sx_num_of_sets = 7; 1440 dev_priv->r700_sc_prim_fifo_size = 0x40; 1441 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1442 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1443 break; 1444 case CHIP_RV740: 1445 dev_priv->r600_max_pipes = 4; 1446 dev_priv->r600_max_tile_pipes = 4; 1447 dev_priv->r600_max_simds = 8; 1448 dev_priv->r600_max_backends = 4; 1449 dev_priv->r600_max_gprs = 256; 1450 dev_priv->r600_max_threads = 248; 1451 dev_priv->r600_max_stack_entries = 512; 1452 dev_priv->r600_max_hw_contexts = 8; 1453 dev_priv->r600_max_gs_threads = 16 * 2; 1454 dev_priv->r600_sx_max_export_size = 256; 1455 dev_priv->r600_sx_max_export_pos_size = 32; 1456 dev_priv->r600_sx_max_export_smx_size = 224; 1457 dev_priv->r600_sq_num_cf_insts = 2; 1458 1459 dev_priv->r700_sx_num_of_sets = 7; 1460 dev_priv->r700_sc_prim_fifo_size = 0x100; 1461 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1462 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1463 1464 if (dev_priv->r600_sx_max_export_pos_size > 16) { 1465 dev_priv->r600_sx_max_export_pos_size -= 16; 1466 dev_priv->r600_sx_max_export_smx_size += 16; 1467 } 1468 break; 1469 default: 1470 break; 1471 } 1472 1473 /* Initialize HDP */ 1474 j = 0; 1475 for (i = 0; i < 32; i++) { 1476 RADEON_WRITE((0x2c14 + j), 0x00000000); 1477 RADEON_WRITE((0x2c18 + j), 0x00000000); 1478 RADEON_WRITE((0x2c1c + j), 0x00000000); 1479 RADEON_WRITE((0x2c20 + j), 0x00000000); 1480 RADEON_WRITE((0x2c24 + j), 0x00000000); 1481 j += 0x18; 1482 } 1483 1484 RADEON_WRITE(R600_GRBM_CNTL, R600_GRBM_READ_TIMEOUT(0xff)); 1485 1486 /* setup tiling, simd, pipe config */ 1487 mc_arb_ramcfg = RADEON_READ(R700_MC_ARB_RAMCFG); 1488 1489 switch (dev_priv->r600_max_tile_pipes) { 1490 case 1: 1491 gb_tiling_config |= R600_PIPE_TILING(0); 1492 break; 1493 case 2: 1494 gb_tiling_config |= R600_PIPE_TILING(1); 1495 break; 1496 case 4: 1497 gb_tiling_config |= R600_PIPE_TILING(2); 1498 break; 1499 case 8: 1500 gb_tiling_config |= R600_PIPE_TILING(3); 1501 break; 1502 default: 1503 break; 1504 } 1505 1506 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770) 1507 gb_tiling_config |= R600_BANK_TILING(1); 1508 else 1509 gb_tiling_config |= R600_BANK_TILING((mc_arb_ramcfg >> R700_NOOFBANK_SHIFT) & R700_NOOFBANK_MASK); 1510 1511 gb_tiling_config |= R600_GROUP_SIZE(0); 1512 1513 if (((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK) > 3) { 1514 gb_tiling_config |= R600_ROW_TILING(3); 1515 gb_tiling_config |= R600_SAMPLE_SPLIT(3); 1516 } else { 1517 gb_tiling_config |= 1518 R600_ROW_TILING(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK)); 1519 gb_tiling_config |= 1520 R600_SAMPLE_SPLIT(((mc_arb_ramcfg >> R700_NOOFROWS_SHIFT) & R700_NOOFROWS_MASK)); 1521 } 1522 1523 gb_tiling_config |= R600_BANK_SWAPS(1); 1524 1525 cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000; 1526 cc_rb_backend_disable |= 1527 R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK); 1528 1529 cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 1530 cc_gc_shader_pipe_config |= 1531 R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK); 1532 cc_gc_shader_pipe_config |= 1533 R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK); 1534 1535 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) 1536 backend_map = 0x28; 1537 else 1538 backend_map = r700_get_tile_pipe_to_backend_map(dev_priv, 1539 dev_priv->r600_max_tile_pipes, 1540 (R7XX_MAX_BACKENDS - 1541 r600_count_pipe_bits((cc_rb_backend_disable & 1542 R7XX_MAX_BACKENDS_MASK) >> 16)), 1543 (cc_rb_backend_disable >> 16)); 1544 gb_tiling_config |= R600_BACKEND_MAP(backend_map); 1545 1546 RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); 1547 RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1548 RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 1549 if (gb_tiling_config & 0xc0) { 1550 dev_priv->r600_group_size = 512; 1551 } else { 1552 dev_priv->r600_group_size = 256; 1553 } 1554 dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7); 1555 if (gb_tiling_config & 0x30) { 1556 dev_priv->r600_nbanks = 8; 1557 } else { 1558 dev_priv->r600_nbanks = 4; 1559 } 1560 1561 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1562 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1563 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1564 1565 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1566 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); 1567 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); 1568 RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0); 1569 RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0); 1570 1571 num_qd_pipes = 1572 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); 1573 RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); 1574 RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); 1575 1576 /* set HW defaults for 3D engine */ 1577 RADEON_WRITE(R600_CP_QUEUE_THRESHOLDS, (R600_ROQ_IB1_START(0x16) | 1578 R600_ROQ_IB2_START(0x2b))); 1579 1580 RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30)); 1581 1582 ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX); 1583 RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO); 1584 1585 sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1); 1586 sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS; 1587 RADEON_WRITE(R700_SX_DEBUG_1, sx_debug_1); 1588 1589 smx_dc_ctl0 = RADEON_READ(R600_SMX_DC_CTL0); 1590 smx_dc_ctl0 &= ~R700_CACHE_DEPTH(0x1ff); 1591 smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1); 1592 RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0); 1593 1594 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740) 1595 RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) | 1596 R700_GS_FLUSH_CTL(4) | 1597 R700_ACK_FLUSH_CTL(3) | 1598 R700_SYNC_FLUSH_CTL)); 1599 1600 db_debug3 = RADEON_READ(R700_DB_DEBUG3); 1601 db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f); 1602 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1603 case CHIP_RV770: 1604 case CHIP_RV740: 1605 db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f); 1606 break; 1607 case CHIP_RV710: 1608 case CHIP_RV730: 1609 default: 1610 db_debug3 |= R700_DB_CLK_OFF_DELAY(2); 1611 break; 1612 } 1613 RADEON_WRITE(R700_DB_DEBUG3, db_debug3); 1614 1615 if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) { 1616 db_debug4 = RADEON_READ(RV700_DB_DEBUG4); 1617 db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER; 1618 RADEON_WRITE(RV700_DB_DEBUG4, db_debug4); 1619 } 1620 1621 RADEON_WRITE(R600_SX_EXPORT_BUFFER_SIZES, (R600_COLOR_BUFFER_SIZE((dev_priv->r600_sx_max_export_size / 4) - 1) | 1622 R600_POSITION_BUFFER_SIZE((dev_priv->r600_sx_max_export_pos_size / 4) - 1) | 1623 R600_SMX_BUFFER_SIZE((dev_priv->r600_sx_max_export_smx_size / 4) - 1))); 1624 1625 RADEON_WRITE(R700_PA_SC_FIFO_SIZE_R7XX, (R700_SC_PRIM_FIFO_SIZE(dev_priv->r700_sc_prim_fifo_size) | 1626 R700_SC_HIZ_TILE_FIFO_SIZE(dev_priv->r700_sc_hiz_tile_fifo_size) | 1627 R700_SC_EARLYZ_TILE_FIFO_SIZE(dev_priv->r700_sc_earlyz_tile_fifo_fize))); 1628 1629 RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0); 1630 1631 RADEON_WRITE(R600_VGT_NUM_INSTANCES, 1); 1632 1633 RADEON_WRITE(R600_SPI_CONFIG_CNTL, R600_GPR_WRITE_PRIORITY(0)); 1634 1635 RADEON_WRITE(R600_SPI_CONFIG_CNTL_1, R600_VTX_DONE_DELAY(4)); 1636 1637 RADEON_WRITE(R600_CP_PERFMON_CNTL, 0); 1638 1639 sq_ms_fifo_sizes = (R600_CACHE_FIFO_SIZE(16 * dev_priv->r600_sq_num_cf_insts) | 1640 R600_DONE_FIFO_HIWATER(0xe0) | 1641 R600_ALU_UPDATE_FIFO_HIWATER(0x8)); 1642 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1643 case CHIP_RV770: 1644 case CHIP_RV730: 1645 case CHIP_RV710: 1646 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1); 1647 break; 1648 case CHIP_RV740: 1649 default: 1650 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); 1651 break; 1652 } 1653 RADEON_WRITE(R600_SQ_MS_FIFO_SIZES, sq_ms_fifo_sizes); 1654 1655 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 1656 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 1657 */ 1658 sq_config = RADEON_READ(R600_SQ_CONFIG); 1659 sq_config &= ~(R600_PS_PRIO(3) | 1660 R600_VS_PRIO(3) | 1661 R600_GS_PRIO(3) | 1662 R600_ES_PRIO(3)); 1663 sq_config |= (R600_DX9_CONSTS | 1664 R600_VC_ENABLE | 1665 R600_EXPORT_SRC_C | 1666 R600_PS_PRIO(0) | 1667 R600_VS_PRIO(1) | 1668 R600_GS_PRIO(2) | 1669 R600_ES_PRIO(3)); 1670 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710) 1671 /* no vertex cache */ 1672 sq_config &= ~R600_VC_ENABLE; 1673 1674 RADEON_WRITE(R600_SQ_CONFIG, sq_config); 1675 1676 RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_1, (R600_NUM_PS_GPRS((dev_priv->r600_max_gprs * 24)/64) | 1677 R600_NUM_VS_GPRS((dev_priv->r600_max_gprs * 24)/64) | 1678 R600_NUM_CLAUSE_TEMP_GPRS(((dev_priv->r600_max_gprs * 24)/64)/2))); 1679 1680 RADEON_WRITE(R600_SQ_GPR_RESOURCE_MGMT_2, (R600_NUM_GS_GPRS((dev_priv->r600_max_gprs * 7)/64) | 1681 R600_NUM_ES_GPRS((dev_priv->r600_max_gprs * 7)/64))); 1682 1683 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS((dev_priv->r600_max_threads * 4)/8) | 1684 R600_NUM_VS_THREADS((dev_priv->r600_max_threads * 2)/8) | 1685 R600_NUM_ES_THREADS((dev_priv->r600_max_threads * 1)/8)); 1686 if (((dev_priv->r600_max_threads * 1) / 8) > dev_priv->r600_max_gs_threads) 1687 sq_thread_resource_mgmt |= R600_NUM_GS_THREADS(dev_priv->r600_max_gs_threads); 1688 else 1689 sq_thread_resource_mgmt |= R600_NUM_GS_THREADS((dev_priv->r600_max_gs_threads * 1)/8); 1690 RADEON_WRITE(R600_SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 1691 1692 RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_1, (R600_NUM_PS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) | 1693 R600_NUM_VS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4))); 1694 1695 RADEON_WRITE(R600_SQ_STACK_RESOURCE_MGMT_2, (R600_NUM_GS_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4) | 1696 R600_NUM_ES_STACK_ENTRIES((dev_priv->r600_max_stack_entries * 1)/4))); 1697 1698 sq_dyn_gpr_size_simd_ab_0 = (R700_SIMDA_RING0((dev_priv->r600_max_gprs * 38)/64) | 1699 R700_SIMDA_RING1((dev_priv->r600_max_gprs * 38)/64) | 1700 R700_SIMDB_RING0((dev_priv->r600_max_gprs * 38)/64) | 1701 R700_SIMDB_RING1((dev_priv->r600_max_gprs * 38)/64)); 1702 1703 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_0, sq_dyn_gpr_size_simd_ab_0); 1704 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_1, sq_dyn_gpr_size_simd_ab_0); 1705 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_2, sq_dyn_gpr_size_simd_ab_0); 1706 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_3, sq_dyn_gpr_size_simd_ab_0); 1707 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_4, sq_dyn_gpr_size_simd_ab_0); 1708 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_5, sq_dyn_gpr_size_simd_ab_0); 1709 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_6, sq_dyn_gpr_size_simd_ab_0); 1710 RADEON_WRITE(R700_SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0); 1711 1712 RADEON_WRITE(R700_PA_SC_FORCE_EOV_MAX_CNTS, (R700_FORCE_EOV_MAX_CLK_CNT(4095) | 1713 R700_FORCE_EOV_MAX_REZ_CNT(255))); 1714 1715 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710) 1716 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_TC_ONLY) | 1717 R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO))); 1718 else 1719 RADEON_WRITE(R600_VGT_CACHE_INVALIDATION, (R600_CACHE_INVALIDATION(R600_VC_AND_TC) | 1720 R700_AUTO_INVLD_EN(R700_ES_AND_GS_AUTO))); 1721 1722 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1723 case CHIP_RV770: 1724 case CHIP_RV730: 1725 case CHIP_RV740: 1726 gs_prim_buffer_depth = 384; 1727 break; 1728 case CHIP_RV710: 1729 gs_prim_buffer_depth = 128; 1730 break; 1731 default: 1732 break; 1733 } 1734 1735 num_gs_verts_per_thread = dev_priv->r600_max_pipes * 16; 1736 vgt_gs_per_es = gs_prim_buffer_depth + num_gs_verts_per_thread; 1737 /* Max value for this is 256 */ 1738 if (vgt_gs_per_es > 256) 1739 vgt_gs_per_es = 256; 1740 1741 RADEON_WRITE(R600_VGT_ES_PER_GS, 128); 1742 RADEON_WRITE(R600_VGT_GS_PER_ES, vgt_gs_per_es); 1743 RADEON_WRITE(R600_VGT_GS_PER_VS, 2); 1744 1745 /* more default values. 2D/3D driver should adjust as needed */ 1746 RADEON_WRITE(R600_VGT_GS_VERTEX_REUSE, 16); 1747 RADEON_WRITE(R600_PA_SC_LINE_STIPPLE_STATE, 0); 1748 RADEON_WRITE(R600_VGT_STRMOUT_EN, 0); 1749 RADEON_WRITE(R600_SX_MISC, 0); 1750 RADEON_WRITE(R600_PA_SC_MODE_CNTL, 0); 1751 RADEON_WRITE(R700_PA_SC_EDGERULE, 0xaaaaaaaa); 1752 RADEON_WRITE(R600_PA_SC_AA_CONFIG, 0); 1753 RADEON_WRITE(R600_PA_SC_CLIPRECT_RULE, 0xffff); 1754 RADEON_WRITE(R600_PA_SC_LINE_STIPPLE, 0); 1755 RADEON_WRITE(R600_SPI_INPUT_Z, 0); 1756 RADEON_WRITE(R600_SPI_PS_IN_CONTROL_0, R600_NUM_INTERP(2)); 1757 RADEON_WRITE(R600_CB_COLOR7_FRAG, 0); 1758 1759 /* clear render buffer base addresses */ 1760 RADEON_WRITE(R600_CB_COLOR0_BASE, 0); 1761 RADEON_WRITE(R600_CB_COLOR1_BASE, 0); 1762 RADEON_WRITE(R600_CB_COLOR2_BASE, 0); 1763 RADEON_WRITE(R600_CB_COLOR3_BASE, 0); 1764 RADEON_WRITE(R600_CB_COLOR4_BASE, 0); 1765 RADEON_WRITE(R600_CB_COLOR5_BASE, 0); 1766 RADEON_WRITE(R600_CB_COLOR6_BASE, 0); 1767 RADEON_WRITE(R600_CB_COLOR7_BASE, 0); 1768 1769 RADEON_WRITE(R700_TCP_CNTL, 0); 1770 1771 hdp_host_path_cntl = RADEON_READ(R600_HDP_HOST_PATH_CNTL); 1772 RADEON_WRITE(R600_HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 1773 1774 RADEON_WRITE(R600_PA_SC_MULTI_CHIP_CNTL, 0); 1775 1776 RADEON_WRITE(R600_PA_CL_ENHANCE, (R600_CLIP_VTX_REORDER_ENA | 1777 R600_NUM_CLIP_SEQ(3))); 1778 1779} 1780 1781static void r600_cp_init_ring_buffer(struct drm_device *dev, 1782 drm_radeon_private_t *dev_priv, 1783 struct drm_file *file_priv) 1784{ 1785 struct drm_radeon_master_private *master_priv; 1786 u32 ring_start; 1787 u64 rptr_addr; 1788 1789 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) 1790 r700_gfx_init(dev, dev_priv); 1791 else 1792 r600_gfx_init(dev, dev_priv); 1793 1794 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 1795 RADEON_READ(R600_GRBM_SOFT_RESET); 1796 mdelay(15); 1797 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); 1798 1799 1800 /* Set ring buffer size */ 1801#ifdef __BIG_ENDIAN 1802 RADEON_WRITE(R600_CP_RB_CNTL, 1803 R600_BUF_SWAP_32BIT | 1804 R600_RB_NO_UPDATE | 1805 (dev_priv->ring.rptr_update_l2qw << 8) | 1806 dev_priv->ring.size_l2qw); 1807#else 1808 RADEON_WRITE(R600_CP_RB_CNTL, 1809 RADEON_RB_NO_UPDATE | 1810 (dev_priv->ring.rptr_update_l2qw << 8) | 1811 dev_priv->ring.size_l2qw); 1812#endif 1813 1814 RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0); 1815 1816 /* Set the write pointer delay */ 1817 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); 1818 1819#ifdef __BIG_ENDIAN 1820 RADEON_WRITE(R600_CP_RB_CNTL, 1821 R600_BUF_SWAP_32BIT | 1822 R600_RB_NO_UPDATE | 1823 R600_RB_RPTR_WR_ENA | 1824 (dev_priv->ring.rptr_update_l2qw << 8) | 1825 dev_priv->ring.size_l2qw); 1826#else 1827 RADEON_WRITE(R600_CP_RB_CNTL, 1828 R600_RB_NO_UPDATE | 1829 R600_RB_RPTR_WR_ENA | 1830 (dev_priv->ring.rptr_update_l2qw << 8) | 1831 dev_priv->ring.size_l2qw); 1832#endif 1833 1834 /* Initialize the ring buffer's read and write pointers */ 1835 RADEON_WRITE(R600_CP_RB_RPTR_WR, 0); 1836 RADEON_WRITE(R600_CP_RB_WPTR, 0); 1837 SET_RING_HEAD(dev_priv, 0); 1838 dev_priv->ring.tail = 0; 1839 1840#if IS_ENABLED(CONFIG_AGP) 1841 if (dev_priv->flags & RADEON_IS_AGP) { 1842 rptr_addr = dev_priv->ring_rptr->offset 1843 - dev->agp->base + 1844 dev_priv->gart_vm_start; 1845 } else 1846#endif 1847 { 1848 rptr_addr = dev_priv->ring_rptr->offset 1849 - ((unsigned long) dev->sg->virtual) 1850 + dev_priv->gart_vm_start; 1851 } 1852 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc)); 1853 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, upper_32_bits(rptr_addr)); 1854 1855#ifdef __BIG_ENDIAN 1856 RADEON_WRITE(R600_CP_RB_CNTL, 1857 RADEON_BUF_SWAP_32BIT | 1858 (dev_priv->ring.rptr_update_l2qw << 8) | 1859 dev_priv->ring.size_l2qw); 1860#else 1861 RADEON_WRITE(R600_CP_RB_CNTL, 1862 (dev_priv->ring.rptr_update_l2qw << 8) | 1863 dev_priv->ring.size_l2qw); 1864#endif 1865 1866#if IS_ENABLED(CONFIG_AGP) 1867 if (dev_priv->flags & RADEON_IS_AGP) { 1868 /* XXX */ 1869 radeon_write_agp_base(dev_priv, dev->agp->base); 1870 1871 /* XXX */ 1872 radeon_write_agp_location(dev_priv, 1873 (((dev_priv->gart_vm_start - 1 + 1874 dev_priv->gart_size) & 0xffff0000) | 1875 (dev_priv->gart_vm_start >> 16))); 1876 1877 ring_start = (dev_priv->cp_ring->offset 1878 - dev->agp->base 1879 + dev_priv->gart_vm_start); 1880 } else 1881#endif 1882 ring_start = (dev_priv->cp_ring->offset 1883 - (unsigned long)dev->sg->virtual 1884 + dev_priv->gart_vm_start); 1885 1886 RADEON_WRITE(R600_CP_RB_BASE, ring_start >> 8); 1887 1888 RADEON_WRITE(R600_CP_ME_CNTL, 0xff); 1889 1890 RADEON_WRITE(R600_CP_DEBUG, (1 << 27) | (1 << 28)); 1891 1892 /* Initialize the scratch register pointer. This will cause 1893 * the scratch register values to be written out to memory 1894 * whenever they are updated. 1895 * 1896 * We simply put this behind the ring read pointer, this works 1897 * with PCI GART as well as (whatever kind of) AGP GART 1898 */ 1899 { 1900 u64 scratch_addr; 1901 1902 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC; 1903 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; 1904 scratch_addr += R600_SCRATCH_REG_OFFSET; 1905 scratch_addr >>= 8; 1906 scratch_addr &= 0xffffffff; 1907 1908 RADEON_WRITE(R600_SCRATCH_ADDR, (uint32_t)scratch_addr); 1909 } 1910 1911 RADEON_WRITE(R600_SCRATCH_UMSK, 0x7); 1912 1913 /* Turn on bus mastering */ 1914 radeon_enable_bm(dev_priv); 1915 1916 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(0), 0); 1917 RADEON_WRITE(R600_LAST_FRAME_REG, 0); 1918 1919 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0); 1920 RADEON_WRITE(R600_LAST_DISPATCH_REG, 0); 1921 1922 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(2), 0); 1923 RADEON_WRITE(R600_LAST_CLEAR_REG, 0); 1924 1925 /* reset sarea copies of these */ 1926 master_priv = file_priv->master->driver_priv; 1927 if (master_priv->sarea_priv) { 1928 master_priv->sarea_priv->last_frame = 0; 1929 master_priv->sarea_priv->last_dispatch = 0; 1930 master_priv->sarea_priv->last_clear = 0; 1931 } 1932 1933 r600_do_wait_for_idle(dev_priv); 1934 1935} 1936 1937int r600_do_cleanup_cp(struct drm_device *dev) 1938{ 1939 drm_radeon_private_t *dev_priv = dev->dev_private; 1940 DRM_DEBUG("\n"); 1941 1942 /* Make sure interrupts are disabled here because the uninstall ioctl 1943 * may not have been called from userspace and after dev_private 1944 * is freed, it's too late. 1945 */ 1946 if (dev->irq_enabled) 1947 drm_irq_uninstall(dev); 1948 1949#if IS_ENABLED(CONFIG_AGP) 1950 if (dev_priv->flags & RADEON_IS_AGP) { 1951 if (dev_priv->cp_ring != NULL) { 1952 drm_legacy_ioremapfree(dev_priv->cp_ring, dev); 1953 dev_priv->cp_ring = NULL; 1954 } 1955 if (dev_priv->ring_rptr != NULL) { 1956 drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); 1957 dev_priv->ring_rptr = NULL; 1958 } 1959 if (dev->agp_buffer_map != NULL) { 1960 drm_legacy_ioremapfree(dev->agp_buffer_map, dev); 1961 dev->agp_buffer_map = NULL; 1962 } 1963 } else 1964#endif 1965 { 1966 1967 if (dev_priv->gart_info.bus_addr) 1968 r600_page_table_cleanup(dev, &dev_priv->gart_info); 1969 1970 if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) { 1971 drm_legacy_ioremapfree(&dev_priv->gart_info.mapping, dev); 1972 dev_priv->gart_info.addr = NULL; 1973 } 1974 } 1975 /* only clear to the start of flags */ 1976 memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags)); 1977 1978 return 0; 1979} 1980 1981int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, 1982 struct drm_file *file_priv) 1983{ 1984 drm_radeon_private_t *dev_priv = dev->dev_private; 1985 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; 1986 1987 DRM_DEBUG("\n"); 1988 1989 mutex_init(&dev_priv->cs_mutex); 1990 r600_cs_legacy_init(); 1991 /* if we require new memory map but we don't have it fail */ 1992 if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) { 1993 DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n"); 1994 r600_do_cleanup_cp(dev); 1995 return -EINVAL; 1996 } 1997 1998 if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) { 1999 DRM_DEBUG("Forcing AGP card to PCI mode\n"); 2000 dev_priv->flags &= ~RADEON_IS_AGP; 2001 /* The writeback test succeeds, but when writeback is enabled, 2002 * the ring buffer read ptr update fails after first 128 bytes. 2003 */ 2004 radeon_no_wb = 1; 2005 } else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE)) 2006 && !init->is_pci) { 2007 DRM_DEBUG("Restoring AGP flag\n"); 2008 dev_priv->flags |= RADEON_IS_AGP; 2009 } 2010 2011 dev_priv->usec_timeout = init->usec_timeout; 2012 if (dev_priv->usec_timeout < 1 || 2013 dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) { 2014 DRM_DEBUG("TIMEOUT problem!\n"); 2015 r600_do_cleanup_cp(dev); 2016 return -EINVAL; 2017 } 2018 2019 /* Enable vblank on CRTC1 for older X servers 2020 */ 2021 dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1; 2022 dev_priv->do_boxes = 0; 2023 dev_priv->cp_mode = init->cp_mode; 2024 2025 /* We don't support anything other than bus-mastering ring mode, 2026 * but the ring can be in either AGP or PCI space for the ring 2027 * read pointer. 2028 */ 2029 if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) && 2030 (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) { 2031 DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode); 2032 r600_do_cleanup_cp(dev); 2033 return -EINVAL; 2034 } 2035 2036 switch (init->fb_bpp) { 2037 case 16: 2038 dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565; 2039 break; 2040 case 32: 2041 default: 2042 dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888; 2043 break; 2044 } 2045 dev_priv->front_offset = init->front_offset; 2046 dev_priv->front_pitch = init->front_pitch; 2047 dev_priv->back_offset = init->back_offset; 2048 dev_priv->back_pitch = init->back_pitch; 2049 2050 dev_priv->ring_offset = init->ring_offset; 2051 dev_priv->ring_rptr_offset = init->ring_rptr_offset; 2052 dev_priv->buffers_offset = init->buffers_offset; 2053 dev_priv->gart_textures_offset = init->gart_textures_offset; 2054 2055 master_priv->sarea = drm_legacy_getsarea(dev); 2056 if (!master_priv->sarea) { 2057 DRM_ERROR("could not find sarea!\n"); 2058 r600_do_cleanup_cp(dev); 2059 return -EINVAL; 2060 } 2061 2062 dev_priv->cp_ring = drm_legacy_findmap(dev, init->ring_offset); 2063 if (!dev_priv->cp_ring) { 2064 DRM_ERROR("could not find cp ring region!\n"); 2065 r600_do_cleanup_cp(dev); 2066 return -EINVAL; 2067 } 2068 dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset); 2069 if (!dev_priv->ring_rptr) { 2070 DRM_ERROR("could not find ring read pointer!\n"); 2071 r600_do_cleanup_cp(dev); 2072 return -EINVAL; 2073 } 2074 dev->agp_buffer_token = init->buffers_offset; 2075 dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); 2076 if (!dev->agp_buffer_map) { 2077 DRM_ERROR("could not find dma buffer region!\n"); 2078 r600_do_cleanup_cp(dev); 2079 return -EINVAL; 2080 } 2081 2082 if (init->gart_textures_offset) { 2083 dev_priv->gart_textures = 2084 drm_legacy_findmap(dev, init->gart_textures_offset); 2085 if (!dev_priv->gart_textures) { 2086 DRM_ERROR("could not find GART texture region!\n"); 2087 r600_do_cleanup_cp(dev); 2088 return -EINVAL; 2089 } 2090 } 2091 2092#if IS_ENABLED(CONFIG_AGP) 2093 /* XXX */ 2094 if (dev_priv->flags & RADEON_IS_AGP) { 2095 drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); 2096 drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); 2097 drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); 2098 if (!dev_priv->cp_ring->handle || 2099 !dev_priv->ring_rptr->handle || 2100 !dev->agp_buffer_map->handle) { 2101 DRM_ERROR("could not find ioremap agp regions!\n"); 2102 r600_do_cleanup_cp(dev); 2103 return -EINVAL; 2104 } 2105 } else 2106#endif 2107 { 2108 dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset; 2109 dev_priv->ring_rptr->handle = 2110 (void *)(unsigned long)dev_priv->ring_rptr->offset; 2111 dev->agp_buffer_map->handle = 2112 (void *)(unsigned long)dev->agp_buffer_map->offset; 2113 2114 DRM_DEBUG("dev_priv->cp_ring->handle %p\n", 2115 dev_priv->cp_ring->handle); 2116 DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", 2117 dev_priv->ring_rptr->handle); 2118 DRM_DEBUG("dev->agp_buffer_map->handle %p\n", 2119 dev->agp_buffer_map->handle); 2120 } 2121 2122 dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24; 2123 dev_priv->fb_size = 2124 (((radeon_read_fb_location(dev_priv) & 0xffff0000u) << 8) + 0x1000000) 2125 - dev_priv->fb_location; 2126 2127 dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | 2128 ((dev_priv->front_offset 2129 + dev_priv->fb_location) >> 10)); 2130 2131 dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) | 2132 ((dev_priv->back_offset 2133 + dev_priv->fb_location) >> 10)); 2134 2135 dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) | 2136 ((dev_priv->depth_offset 2137 + dev_priv->fb_location) >> 10)); 2138 2139 dev_priv->gart_size = init->gart_size; 2140 2141 /* New let's set the memory map ... */ 2142 if (dev_priv->new_memmap) { 2143 u32 base = 0; 2144 2145 DRM_INFO("Setting GART location based on new memory map\n"); 2146 2147 /* If using AGP, try to locate the AGP aperture at the same 2148 * location in the card and on the bus, though we have to 2149 * align it down. 2150 */ 2151#if IS_ENABLED(CONFIG_AGP) 2152 /* XXX */ 2153 if (dev_priv->flags & RADEON_IS_AGP) { 2154 base = dev->agp->base; 2155 /* Check if valid */ 2156 if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location && 2157 base < (dev_priv->fb_location + dev_priv->fb_size - 1)) { 2158 DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n", 2159 dev->agp->base); 2160 base = 0; 2161 } 2162 } 2163#endif 2164 /* If not or if AGP is at 0 (Macs), try to put it elsewhere */ 2165 if (base == 0) { 2166 base = dev_priv->fb_location + dev_priv->fb_size; 2167 if (base < dev_priv->fb_location || 2168 ((base + dev_priv->gart_size) & 0xfffffffful) < base) 2169 base = dev_priv->fb_location 2170 - dev_priv->gart_size; 2171 } 2172 dev_priv->gart_vm_start = base & 0xffc00000u; 2173 if (dev_priv->gart_vm_start != base) 2174 DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", 2175 base, dev_priv->gart_vm_start); 2176 } 2177 2178#if IS_ENABLED(CONFIG_AGP) 2179 /* XXX */ 2180 if (dev_priv->flags & RADEON_IS_AGP) 2181 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 2182 - dev->agp->base 2183 + dev_priv->gart_vm_start); 2184 else 2185#endif 2186 dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset 2187 - (unsigned long)dev->sg->virtual 2188 + dev_priv->gart_vm_start); 2189 2190 DRM_DEBUG("fb 0x%08x size %d\n", 2191 (unsigned int) dev_priv->fb_location, 2192 (unsigned int) dev_priv->fb_size); 2193 DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); 2194 DRM_DEBUG("dev_priv->gart_vm_start 0x%08x\n", 2195 (unsigned int) dev_priv->gart_vm_start); 2196 DRM_DEBUG("dev_priv->gart_buffers_offset 0x%08lx\n", 2197 dev_priv->gart_buffers_offset); 2198 2199 dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle; 2200 dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle 2201 + init->ring_size / sizeof(u32)); 2202 dev_priv->ring.size = init->ring_size; 2203 dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8); 2204 2205 dev_priv->ring.rptr_update = /* init->rptr_update */ 4096; 2206 dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8); 2207 2208 dev_priv->ring.fetch_size = /* init->fetch_size */ 32; 2209 dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16); 2210 2211 dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1; 2212 2213 dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK; 2214 2215#if IS_ENABLED(CONFIG_AGP) 2216 if (dev_priv->flags & RADEON_IS_AGP) { 2217 /* XXX turn off pcie gart */ 2218 } else 2219#endif 2220 { 2221 dev_priv->gart_info.table_mask = DMA_BIT_MASK(32); 2222 /* if we have an offset set from userspace */ 2223 if (!dev_priv->pcigart_offset_set) { 2224 DRM_ERROR("Need gart offset from userspace\n"); 2225 r600_do_cleanup_cp(dev); 2226 return -EINVAL; 2227 } 2228 2229 DRM_DEBUG("Using gart offset 0x%08lx\n", dev_priv->pcigart_offset); 2230 2231 dev_priv->gart_info.bus_addr = 2232 dev_priv->pcigart_offset + dev_priv->fb_location; 2233 dev_priv->gart_info.mapping.offset = 2234 dev_priv->pcigart_offset + dev_priv->fb_aper_offset; 2235 dev_priv->gart_info.mapping.size = 2236 dev_priv->gart_info.table_size; 2237 2238 drm_legacy_ioremap_wc(&dev_priv->gart_info.mapping, dev); 2239 if (!dev_priv->gart_info.mapping.handle) { 2240 DRM_ERROR("ioremap failed.\n"); 2241 r600_do_cleanup_cp(dev); 2242 return -EINVAL; 2243 } 2244 2245 dev_priv->gart_info.addr = 2246 dev_priv->gart_info.mapping.handle; 2247 2248 DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n", 2249 dev_priv->gart_info.addr, 2250 dev_priv->pcigart_offset); 2251 2252 if (!r600_page_table_init(dev)) { 2253 DRM_ERROR("Failed to init GART table\n"); 2254 r600_do_cleanup_cp(dev); 2255 return -EINVAL; 2256 } 2257 2258 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) 2259 r700_vm_init(dev); 2260 else 2261 r600_vm_init(dev); 2262 } 2263 2264 if (!dev_priv->me_fw || !dev_priv->pfp_fw) { 2265 int err = r600_cp_init_microcode(dev_priv); 2266 if (err) { 2267 DRM_ERROR("Failed to load firmware!\n"); 2268 r600_do_cleanup_cp(dev); 2269 return err; 2270 } 2271 } 2272 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) 2273 r700_cp_load_microcode(dev_priv); 2274 else 2275 r600_cp_load_microcode(dev_priv); 2276 2277 r600_cp_init_ring_buffer(dev, dev_priv, file_priv); 2278 2279 dev_priv->last_buf = 0; 2280 2281 r600_do_engine_reset(dev); 2282 r600_test_writeback(dev_priv); 2283 2284 return 0; 2285} 2286 2287int r600_do_resume_cp(struct drm_device *dev, struct drm_file *file_priv) 2288{ 2289 drm_radeon_private_t *dev_priv = dev->dev_private; 2290 2291 DRM_DEBUG("\n"); 2292 if (((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770)) { 2293 r700_vm_init(dev); 2294 r700_cp_load_microcode(dev_priv); 2295 } else { 2296 r600_vm_init(dev); 2297 r600_cp_load_microcode(dev_priv); 2298 } 2299 r600_cp_init_ring_buffer(dev, dev_priv, file_priv); 2300 r600_do_engine_reset(dev); 2301 2302 return 0; 2303} 2304 2305/* Wait for the CP to go idle. 2306 */ 2307int r600_do_cp_idle(drm_radeon_private_t *dev_priv) 2308{ 2309 RING_LOCALS; 2310 DRM_DEBUG("\n"); 2311 2312 BEGIN_RING(5); 2313 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0)); 2314 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT); 2315 /* wait for 3D idle clean */ 2316 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1)); 2317 OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2); 2318 OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN); 2319 2320 ADVANCE_RING(); 2321 COMMIT_RING(); 2322 2323 return r600_do_wait_for_idle(dev_priv); 2324} 2325 2326/* Start the Command Processor. 2327 */ 2328void r600_do_cp_start(drm_radeon_private_t *dev_priv) 2329{ 2330 u32 cp_me; 2331 RING_LOCALS; 2332 DRM_DEBUG("\n"); 2333 2334 BEGIN_RING(7); 2335 OUT_RING(CP_PACKET3(R600_IT_ME_INITIALIZE, 5)); 2336 OUT_RING(0x00000001); 2337 if (((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) 2338 OUT_RING(0x00000003); 2339 else 2340 OUT_RING(0x00000000); 2341 OUT_RING((dev_priv->r600_max_hw_contexts - 1)); 2342 OUT_RING(R600_ME_INITIALIZE_DEVICE_ID(1)); 2343 OUT_RING(0x00000000); 2344 OUT_RING(0x00000000); 2345 ADVANCE_RING(); 2346 COMMIT_RING(); 2347 2348 /* set the mux and reset the halt bit */ 2349 cp_me = 0xff; 2350 RADEON_WRITE(R600_CP_ME_CNTL, cp_me); 2351 2352 dev_priv->cp_running = 1; 2353 2354} 2355 2356void r600_do_cp_reset(drm_radeon_private_t *dev_priv) 2357{ 2358 u32 cur_read_ptr; 2359 DRM_DEBUG("\n"); 2360 2361 cur_read_ptr = RADEON_READ(R600_CP_RB_RPTR); 2362 RADEON_WRITE(R600_CP_RB_WPTR, cur_read_ptr); 2363 SET_RING_HEAD(dev_priv, cur_read_ptr); 2364 dev_priv->ring.tail = cur_read_ptr; 2365} 2366 2367void r600_do_cp_stop(drm_radeon_private_t *dev_priv) 2368{ 2369 uint32_t cp_me; 2370 2371 DRM_DEBUG("\n"); 2372 2373 cp_me = 0xff | R600_CP_ME_HALT; 2374 2375 RADEON_WRITE(R600_CP_ME_CNTL, cp_me); 2376 2377 dev_priv->cp_running = 0; 2378} 2379 2380int r600_cp_dispatch_indirect(struct drm_device *dev, 2381 struct drm_buf *buf, int start, int end) 2382{ 2383 drm_radeon_private_t *dev_priv = dev->dev_private; 2384 RING_LOCALS; 2385 2386 if (start != end) { 2387 unsigned long offset = (dev_priv->gart_buffers_offset 2388 + buf->offset + start); 2389 int dwords = (end - start + 3) / sizeof(u32); 2390 2391 DRM_DEBUG("dwords:%d\n", dwords); 2392 DRM_DEBUG("offset 0x%lx\n", offset); 2393 2394 2395 /* Indirect buffer data must be a multiple of 16 dwords. 2396 * pad the data with a Type-2 CP packet. 2397 */ 2398 while (dwords & 0xf) { 2399 u32 *data = (u32 *) 2400 ((char *)dev->agp_buffer_map->handle 2401 + buf->offset + start); 2402 data[dwords++] = RADEON_CP_PACKET2; 2403 } 2404 2405 /* Fire off the indirect buffer */ 2406 BEGIN_RING(4); 2407 OUT_RING(CP_PACKET3(R600_IT_INDIRECT_BUFFER, 2)); 2408 OUT_RING((offset & 0xfffffffc)); 2409 OUT_RING((upper_32_bits(offset) & 0xff)); 2410 OUT_RING(dwords); 2411 ADVANCE_RING(); 2412 } 2413 2414 return 0; 2415} 2416 2417void r600_cp_dispatch_swap(struct drm_device *dev, struct drm_file *file_priv) 2418{ 2419 drm_radeon_private_t *dev_priv = dev->dev_private; 2420 struct drm_master *master = file_priv->master; 2421 struct drm_radeon_master_private *master_priv = master->driver_priv; 2422 drm_radeon_sarea_t *sarea_priv = master_priv->sarea_priv; 2423 int nbox = sarea_priv->nbox; 2424 struct drm_clip_rect *pbox = sarea_priv->boxes; 2425 int i, cpp, src_pitch, dst_pitch; 2426 uint64_t src, dst; 2427 RING_LOCALS; 2428 DRM_DEBUG("\n"); 2429 2430 if (dev_priv->color_fmt == RADEON_COLOR_FORMAT_ARGB8888) 2431 cpp = 4; 2432 else 2433 cpp = 2; 2434 2435 if (sarea_priv->pfCurrentPage == 0) { 2436 src_pitch = dev_priv->back_pitch; 2437 dst_pitch = dev_priv->front_pitch; 2438 src = dev_priv->back_offset + dev_priv->fb_location; 2439 dst = dev_priv->front_offset + dev_priv->fb_location; 2440 } else { 2441 src_pitch = dev_priv->front_pitch; 2442 dst_pitch = dev_priv->back_pitch; 2443 src = dev_priv->front_offset + dev_priv->fb_location; 2444 dst = dev_priv->back_offset + dev_priv->fb_location; 2445 } 2446 2447 if (r600_prepare_blit_copy(dev, file_priv)) { 2448 DRM_ERROR("unable to allocate vertex buffer for swap buffer\n"); 2449 return; 2450 } 2451 for (i = 0; i < nbox; i++) { 2452 int x = pbox[i].x1; 2453 int y = pbox[i].y1; 2454 int w = pbox[i].x2 - x; 2455 int h = pbox[i].y2 - y; 2456 2457 DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h); 2458 2459 r600_blit_swap(dev, 2460 src, dst, 2461 x, y, x, y, w, h, 2462 src_pitch, dst_pitch, cpp); 2463 } 2464 r600_done_blit_copy(dev); 2465 2466 /* Increment the frame counter. The client-side 3D driver must 2467 * throttle the framerate by waiting for this value before 2468 * performing the swapbuffer ioctl. 2469 */ 2470 sarea_priv->last_frame++; 2471 2472 BEGIN_RING(3); 2473 R600_FRAME_AGE(sarea_priv->last_frame); 2474 ADVANCE_RING(); 2475} 2476 2477int r600_cp_dispatch_texture(struct drm_device *dev, 2478 struct drm_file *file_priv, 2479 drm_radeon_texture_t *tex, 2480 drm_radeon_tex_image_t *image) 2481{ 2482 drm_radeon_private_t *dev_priv = dev->dev_private; 2483 struct drm_buf *buf; 2484 u32 *buffer; 2485 const u8 __user *data; 2486 unsigned int size, pass_size; 2487 u64 src_offset, dst_offset; 2488 2489 if (!radeon_check_offset(dev_priv, tex->offset)) { 2490 DRM_ERROR("Invalid destination offset\n"); 2491 return -EINVAL; 2492 } 2493 2494 /* this might fail for zero-sized uploads - are those illegal? */ 2495 if (!radeon_check_offset(dev_priv, tex->offset + tex->height * tex->pitch - 1)) { 2496 DRM_ERROR("Invalid final destination offset\n"); 2497 return -EINVAL; 2498 } 2499 2500 size = tex->height * tex->pitch; 2501 2502 if (size == 0) 2503 return 0; 2504 2505 dst_offset = tex->offset; 2506 2507 if (r600_prepare_blit_copy(dev, file_priv)) { 2508 DRM_ERROR("unable to allocate vertex buffer for swap buffer\n"); 2509 return -EAGAIN; 2510 } 2511 do { 2512 data = (const u8 __user *)image->data; 2513 pass_size = size; 2514 2515 buf = radeon_freelist_get(dev); 2516 if (!buf) { 2517 DRM_DEBUG("EAGAIN\n"); 2518 if (copy_to_user(tex->image, image, sizeof(*image))) 2519 return -EFAULT; 2520 return -EAGAIN; 2521 } 2522 2523 if (pass_size > buf->total) 2524 pass_size = buf->total; 2525 2526 /* Dispatch the indirect buffer. 2527 */ 2528 buffer = 2529 (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); 2530 2531 if (copy_from_user(buffer, data, pass_size)) { 2532 DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size); 2533 return -EFAULT; 2534 } 2535 2536 buf->file_priv = file_priv; 2537 buf->used = pass_size; 2538 src_offset = dev_priv->gart_buffers_offset + buf->offset; 2539 2540 r600_blit_copy(dev, src_offset, dst_offset, pass_size); 2541 2542 radeon_cp_discard_buffer(dev, file_priv->master, buf); 2543 2544 /* Update the input parameters for next time */ 2545 image->data = (const u8 __user *)image->data + pass_size; 2546 dst_offset += pass_size; 2547 size -= pass_size; 2548 } while (size > 0); 2549 r600_done_blit_copy(dev); 2550 2551 return 0; 2552} 2553 2554/* 2555 * Legacy cs ioctl 2556 */ 2557static u32 radeon_cs_id_get(struct drm_radeon_private *radeon) 2558{ 2559 /* FIXME: check if wrap affect last reported wrap & sequence */ 2560 radeon->cs_id_scnt = (radeon->cs_id_scnt + 1) & 0x00FFFFFF; 2561 if (!radeon->cs_id_scnt) { 2562 /* increment wrap counter */ 2563 radeon->cs_id_wcnt += 0x01000000; 2564 /* valid sequence counter start at 1 */ 2565 radeon->cs_id_scnt = 1; 2566 } 2567 return (radeon->cs_id_scnt | radeon->cs_id_wcnt); 2568} 2569 2570static void r600_cs_id_emit(drm_radeon_private_t *dev_priv, u32 *id) 2571{ 2572 RING_LOCALS; 2573 2574 *id = radeon_cs_id_get(dev_priv); 2575 2576 /* SCRATCH 2 */ 2577 BEGIN_RING(3); 2578 R600_CLEAR_AGE(*id); 2579 ADVANCE_RING(); 2580 COMMIT_RING(); 2581} 2582 2583static int r600_ib_get(struct drm_device *dev, 2584 struct drm_file *fpriv, 2585 struct drm_buf **buffer) 2586{ 2587 struct drm_buf *buf; 2588 2589 *buffer = NULL; 2590 buf = radeon_freelist_get(dev); 2591 if (!buf) { 2592 return -EBUSY; 2593 } 2594 buf->file_priv = fpriv; 2595 *buffer = buf; 2596 return 0; 2597} 2598 2599static void r600_ib_free(struct drm_device *dev, struct drm_buf *buf, 2600 struct drm_file *fpriv, int l, int r) 2601{ 2602 drm_radeon_private_t *dev_priv = dev->dev_private; 2603 2604 if (buf) { 2605 if (!r) 2606 r600_cp_dispatch_indirect(dev, buf, 0, l * 4); 2607 radeon_cp_discard_buffer(dev, fpriv->master, buf); 2608 COMMIT_RING(); 2609 } 2610} 2611 2612int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fpriv) 2613{ 2614 struct drm_radeon_private *dev_priv = dev->dev_private; 2615 struct drm_radeon_cs *cs = data; 2616 struct drm_buf *buf; 2617 unsigned family; 2618 int l, r = 0; 2619 u32 *ib, cs_id = 0; 2620 2621 if (dev_priv == NULL) { 2622 DRM_ERROR("called with no initialization\n"); 2623 return -EINVAL; 2624 } 2625 family = dev_priv->flags & RADEON_FAMILY_MASK; 2626 if (family < CHIP_R600) { 2627 DRM_ERROR("cs ioctl valid only for R6XX & R7XX in legacy mode\n"); 2628 return -EINVAL; 2629 } 2630 mutex_lock(&dev_priv->cs_mutex); 2631 /* get ib */ 2632 r = r600_ib_get(dev, fpriv, &buf); 2633 if (r) { 2634 DRM_ERROR("ib_get failed\n"); 2635 goto out; 2636 } 2637 ib = dev->agp_buffer_map->handle + buf->offset; 2638 /* now parse command stream */ 2639 r = r600_cs_legacy(dev, data, fpriv, family, ib, &l); 2640 if (r) { 2641 goto out; 2642 } 2643 2644out: 2645 r600_ib_free(dev, buf, fpriv, l, r); 2646 /* emit cs id sequence */ 2647 r600_cs_id_emit(dev_priv, &cs_id); 2648 cs->cs_id = cs_id; 2649 mutex_unlock(&dev_priv->cs_mutex); 2650 return r; 2651} 2652 2653void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size) 2654{ 2655 struct drm_radeon_private *dev_priv = dev->dev_private; 2656 2657 *npipes = dev_priv->r600_npipes; 2658 *nbanks = dev_priv->r600_nbanks; 2659 *group_size = dev_priv->r600_group_size; 2660} 2661