1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28#include <linux/string.h> 29#include <linux/bitops.h> 30#include <drm/drmP.h> 31#include <drm/i915_drm.h> 32#include "i915_drv.h" 33 34/** @file i915_gem_tiling.c 35 * 36 * Support for managing tiling state of buffer objects. 37 * 38 * The idea behind tiling is to increase cache hit rates by rearranging 39 * pixel data so that a group of pixel accesses are in the same cacheline. 40 * Performance improvement from doing this on the back/depth buffer are on 41 * the order of 30%. 42 * 43 * Intel architectures make this somewhat more complicated, though, by 44 * adjustments made to addressing of data when the memory is in interleaved 45 * mode (matched pairs of DIMMS) to improve memory bandwidth. 46 * For interleaved memory, the CPU sends every sequential 64 bytes 47 * to an alternate memory channel so it can get the bandwidth from both. 48 * 49 * The GPU also rearranges its accesses for increased bandwidth to interleaved 50 * memory, and it matches what the CPU does for non-tiled. However, when tiled 51 * it does it a little differently, since one walks addresses not just in the 52 * X direction but also Y. So, along with alternating channels when bit 53 * 6 of the address flips, it also alternates when other bits flip -- Bits 9 54 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) 55 * are common to both the 915 and 965-class hardware. 56 * 57 * The CPU also sometimes XORs in higher bits as well, to improve 58 * bandwidth doing strided access like we do so frequently in graphics. This 59 * is called "Channel XOR Randomization" in the MCH documentation. The result 60 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address 61 * decode. 62 * 63 * All of this bit 6 XORing has an effect on our memory management, 64 * as we need to make sure that the 3d driver can correctly address object 65 * contents. 66 * 67 * If we don't have interleaved memory, all tiling is safe and no swizzling is 68 * required. 69 * 70 * When bit 17 is XORed in, we simply refuse to tile at all. Bit 71 * 17 is not just a page offset, so as we page an objet out and back in, 72 * individual pages in it will have different bit 17 addresses, resulting in 73 * each 64 bytes being swapped with its neighbor! 74 * 75 * Otherwise, if interleaved, we have to tell the 3d driver what the address 76 * swizzling it needs to do is, since it's writing with the CPU to the pages 77 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the 78 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling 79 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order 80 * to match what the GPU expects. 81 */ 82 83/** 84 * Detects bit 6 swizzling of address lookup between IGD access and CPU 85 * access through main memory. 86 */ 87void 88i915_gem_detect_bit_6_swizzle(struct drm_device *dev) 89{ 90 struct drm_i915_private *dev_priv = dev->dev_private; 91 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 92 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 93 94 if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) { 95 /* 96 * On BDW+, swizzling is not used. We leave the CPU memory 97 * controller in charge of optimizing memory accesses without 98 * the extra address manipulation GPU side. 99 * 100 * VLV and CHV don't have GPU swizzling. 101 */ 102 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 103 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 104 } else if (INTEL_INFO(dev)->gen >= 6) { 105 if (dev_priv->preserve_bios_swizzle) { 106 if (I915_READ(DISP_ARB_CTL) & 107 DISP_TILE_SURFACE_SWIZZLING) { 108 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 109 swizzle_y = I915_BIT_6_SWIZZLE_9; 110 } else { 111 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 112 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 113 } 114 } else { 115 uint32_t dimm_c0, dimm_c1; 116 dimm_c0 = I915_READ(MAD_DIMM_C0); 117 dimm_c1 = I915_READ(MAD_DIMM_C1); 118 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 119 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 120 /* Enable swizzling when the channels are populated 121 * with identically sized dimms. We don't need to check 122 * the 3rd channel because no cpu with gpu attached 123 * ships in that configuration. Also, swizzling only 124 * makes sense for 2 channels anyway. */ 125 if (dimm_c0 == dimm_c1) { 126 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 127 swizzle_y = I915_BIT_6_SWIZZLE_9; 128 } else { 129 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 130 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 131 } 132 } 133 } else if (IS_GEN5(dev)) { 134 /* On Ironlake whatever DRAM config, GPU always do 135 * same swizzling setup. 136 */ 137 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 138 swizzle_y = I915_BIT_6_SWIZZLE_9; 139 } else if (IS_GEN2(dev)) { 140 /* As far as we know, the 865 doesn't have these bit 6 141 * swizzling issues. 142 */ 143 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 144 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 145 } else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) { 146 uint32_t dcc; 147 148 /* On 9xx chipsets, channel interleave by the CPU is 149 * determined by DCC. For single-channel, neither the CPU 150 * nor the GPU do swizzling. For dual channel interleaved, 151 * the GPU's interleave is bit 9 and 10 for X tiled, and bit 152 * 9 for Y tiled. The CPU's interleave is independent, and 153 * can be based on either bit 11 (haven't seen this yet) or 154 * bit 17 (common). 155 */ 156 dcc = I915_READ(DCC); 157 switch (dcc & DCC_ADDRESSING_MODE_MASK) { 158 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: 159 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: 160 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 161 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 162 break; 163 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: 164 if (dcc & DCC_CHANNEL_XOR_DISABLE) { 165 /* This is the base swizzling by the GPU for 166 * tiled buffers. 167 */ 168 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 169 swizzle_y = I915_BIT_6_SWIZZLE_9; 170 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { 171 /* Bit 11 swizzling by the CPU in addition. */ 172 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 173 swizzle_y = I915_BIT_6_SWIZZLE_9_11; 174 } else { 175 /* Bit 17 swizzling by the CPU in addition. */ 176 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; 177 swizzle_y = I915_BIT_6_SWIZZLE_9_17; 178 } 179 break; 180 } 181 182 /* check for L-shaped memory aka modified enhanced addressing */ 183 if (IS_GEN4(dev)) { 184 uint32_t ddc2 = I915_READ(DCC2); 185 186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) 187 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; 188 } 189 190 if (dcc == 0xffffffff) { 191 DRM_ERROR("Couldn't read from MCHBAR. " 192 "Disabling tiling.\n"); 193 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 194 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 195 } 196 } else { 197 /* The 965, G33, and newer, have a very flexible memory 198 * configuration. It will enable dual-channel mode 199 * (interleaving) on as much memory as it can, and the GPU 200 * will additionally sometimes enable different bit 6 201 * swizzling for tiled objects from the CPU. 202 * 203 * Here's what I found on the G965: 204 * slot fill memory size swizzling 205 * 0A 0B 1A 1B 1-ch 2-ch 206 * 512 0 0 0 512 0 O 207 * 512 0 512 0 16 1008 X 208 * 512 0 0 512 16 1008 X 209 * 0 512 0 512 16 1008 X 210 * 1024 1024 1024 0 2048 1024 O 211 * 212 * We could probably detect this based on either the DRB 213 * matching, which was the case for the swizzling required in 214 * the table above, or from the 1-ch value being less than 215 * the minimum size of a rank. 216 */ 217 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { 218 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 219 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 220 } else { 221 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 222 swizzle_y = I915_BIT_6_SWIZZLE_9; 223 } 224 } 225 226 dev_priv->mm.bit_6_swizzle_x = swizzle_x; 227 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 228} 229 230/* Check pitch constriants for all chips & tiling formats */ 231static bool 232i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 233{ 234 int tile_width; 235 236 /* Linear is always fine */ 237 if (tiling_mode == I915_TILING_NONE) 238 return true; 239 240 if (IS_GEN2(dev) || 241 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) 242 tile_width = 128; 243 else 244 tile_width = 512; 245 246 /* check maximum stride & object size */ 247 /* i965+ stores the end address of the gtt mapping in the fence 248 * reg, so dont bother to check the size */ 249 if (INTEL_INFO(dev)->gen >= 7) { 250 if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL) 251 return false; 252 } else if (INTEL_INFO(dev)->gen >= 4) { 253 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 254 return false; 255 } else { 256 if (stride > 8192) 257 return false; 258 259 if (IS_GEN3(dev)) { 260 if (size > I830_FENCE_MAX_SIZE_VAL << 20) 261 return false; 262 } else { 263 if (size > I830_FENCE_MAX_SIZE_VAL << 19) 264 return false; 265 } 266 } 267 268 if (stride < tile_width) 269 return false; 270 271 /* 965+ just needs multiples of tile width */ 272 if (INTEL_INFO(dev)->gen >= 4) { 273 if (stride & (tile_width - 1)) 274 return false; 275 return true; 276 } 277 278 /* Pre-965 needs power of two tile widths */ 279 if (stride & (stride - 1)) 280 return false; 281 282 return true; 283} 284 285/* Is the current GTT allocation valid for the change in tiling? */ 286static bool 287i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) 288{ 289 u32 size; 290 291 if (tiling_mode == I915_TILING_NONE) 292 return true; 293 294 if (INTEL_INFO(obj->base.dev)->gen >= 4) 295 return true; 296 297 if (INTEL_INFO(obj->base.dev)->gen == 3) { 298 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) 299 return false; 300 } else { 301 if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) 302 return false; 303 } 304 305 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); 306 if (i915_gem_obj_ggtt_size(obj) != size) 307 return false; 308 309 if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) 310 return false; 311 312 return true; 313} 314 315/** 316 * Sets the tiling mode of an object, returning the required swizzling of 317 * bit 6 of addresses in the object. 318 */ 319int 320i915_gem_set_tiling(struct drm_device *dev, void *data, 321 struct drm_file *file) 322{ 323 struct drm_i915_gem_set_tiling *args = data; 324 struct drm_i915_private *dev_priv = dev->dev_private; 325 struct drm_i915_gem_object *obj; 326 int ret = 0; 327 328 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 329 if (&obj->base == NULL) 330 return -ENOENT; 331 332 if (!i915_tiling_ok(dev, 333 args->stride, obj->base.size, args->tiling_mode)) { 334 drm_gem_object_unreference_unlocked(&obj->base); 335 return -EINVAL; 336 } 337 338 mutex_lock(&dev->struct_mutex); 339 if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { 340 ret = -EBUSY; 341 goto err; 342 } 343 344 if (args->tiling_mode == I915_TILING_NONE) { 345 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 346 args->stride = 0; 347 } else { 348 if (args->tiling_mode == I915_TILING_X) 349 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 350 else 351 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 352 353 /* Hide bit 17 swizzling from the user. This prevents old Mesa 354 * from aborting the application on sw fallbacks to bit 17, 355 * and we use the pread/pwrite bit17 paths to swizzle for it. 356 * If there was a user that was relying on the swizzle 357 * information for drm_intel_bo_map()ed reads/writes this would 358 * break it, but we don't have any of those. 359 */ 360 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 361 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 362 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 363 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 364 365 /* If we can't handle the swizzling, make it untiled. */ 366 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 367 args->tiling_mode = I915_TILING_NONE; 368 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 369 args->stride = 0; 370 } 371 } 372 373 if (args->tiling_mode != obj->tiling_mode || 374 args->stride != obj->stride) { 375 /* We need to rebind the object if its current allocation 376 * no longer meets the alignment restrictions for its new 377 * tiling mode. Otherwise we can just leave it alone, but 378 * need to ensure that any fence register is updated before 379 * the next fenced (either through the GTT or by the BLT unit 380 * on older GPUs) access. 381 * 382 * After updating the tiling parameters, we then flag whether 383 * we need to update an associated fence register. Note this 384 * has to also include the unfenced register the GPU uses 385 * whilst executing a fenced command for an untiled object. 386 */ 387 if (obj->map_and_fenceable && 388 !i915_gem_object_fence_ok(obj, args->tiling_mode)) 389 ret = i915_gem_object_ggtt_unbind(obj); 390 391 if (ret == 0) { 392 if (obj->pages && 393 obj->madv == I915_MADV_WILLNEED && 394 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { 395 if (args->tiling_mode == I915_TILING_NONE) 396 i915_gem_object_unpin_pages(obj); 397 if (obj->tiling_mode == I915_TILING_NONE) 398 i915_gem_object_pin_pages(obj); 399 } 400 401 obj->fence_dirty = 402 obj->last_fenced_req || 403 obj->fence_reg != I915_FENCE_REG_NONE; 404 405 obj->tiling_mode = args->tiling_mode; 406 obj->stride = args->stride; 407 408 /* Force the fence to be reacquired for GTT access */ 409 i915_gem_release_mmap(obj); 410 } 411 } 412 /* we have to maintain this existing ABI... */ 413 args->stride = obj->stride; 414 args->tiling_mode = obj->tiling_mode; 415 416 /* Try to preallocate memory required to save swizzling on put-pages */ 417 if (i915_gem_object_needs_bit17_swizzle(obj)) { 418 if (obj->bit_17 == NULL) { 419 obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), 420 sizeof(long), GFP_KERNEL); 421 } 422 } else { 423 kfree(obj->bit_17); 424 obj->bit_17 = NULL; 425 } 426 427err: 428 drm_gem_object_unreference(&obj->base); 429 mutex_unlock(&dev->struct_mutex); 430 431 return ret; 432} 433 434/** 435 * Returns the current tiling mode and required bit 6 swizzling for the object. 436 */ 437int 438i915_gem_get_tiling(struct drm_device *dev, void *data, 439 struct drm_file *file) 440{ 441 struct drm_i915_gem_get_tiling *args = data; 442 struct drm_i915_private *dev_priv = dev->dev_private; 443 struct drm_i915_gem_object *obj; 444 445 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); 446 if (&obj->base == NULL) 447 return -ENOENT; 448 449 mutex_lock(&dev->struct_mutex); 450 451 args->tiling_mode = obj->tiling_mode; 452 switch (obj->tiling_mode) { 453 case I915_TILING_X: 454 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 455 break; 456 case I915_TILING_Y: 457 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 458 break; 459 case I915_TILING_NONE: 460 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 461 break; 462 default: 463 DRM_ERROR("unknown tiling mode\n"); 464 } 465 466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 467 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) 468 args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; 469 else 470 args->phys_swizzle_mode = args->swizzle_mode; 471 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 472 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 473 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 474 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 475 476 drm_gem_object_unreference(&obj->base); 477 mutex_unlock(&dev->struct_mutex); 478 479 return 0; 480} 481 482/** 483 * Swap every 64 bytes of this page around, to account for it having a new 484 * bit 17 of its physical address and therefore being interpreted differently 485 * by the GPU. 486 */ 487static void 488i915_gem_swizzle_page(struct page *page) 489{ 490 char temp[64]; 491 char *vaddr; 492 int i; 493 494 vaddr = kmap(page); 495 496 for (i = 0; i < PAGE_SIZE; i += 128) { 497 memcpy(temp, &vaddr[i], 64); 498 memcpy(&vaddr[i], &vaddr[i + 64], 64); 499 memcpy(&vaddr[i + 64], temp, 64); 500 } 501 502 kunmap(page); 503} 504 505void 506i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 507{ 508 struct sg_page_iter sg_iter; 509 int i; 510 511 if (obj->bit_17 == NULL) 512 return; 513 514 i = 0; 515 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 516 struct page *page = sg_page_iter_page(&sg_iter); 517 char new_bit_17 = page_to_phys(page) >> 17; 518 if ((new_bit_17 & 0x1) != 519 (test_bit(i, obj->bit_17) != 0)) { 520 i915_gem_swizzle_page(page); 521 set_page_dirty(page); 522 } 523 i++; 524 } 525} 526 527void 528i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 529{ 530 struct sg_page_iter sg_iter; 531 int page_count = obj->base.size >> PAGE_SHIFT; 532 int i; 533 534 if (obj->bit_17 == NULL) { 535 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), 536 sizeof(long), GFP_KERNEL); 537 if (obj->bit_17 == NULL) { 538 DRM_ERROR("Failed to allocate memory for bit 17 " 539 "record\n"); 540 return; 541 } 542 } 543 544 i = 0; 545 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 546 if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) 547 __set_bit(i, obj->bit_17); 548 else 549 __clear_bit(i, obj->bit_17); 550 i++; 551 } 552} 553