root/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. gv100_disp_wndw_cnt
  2. gv100_disp_super
  3. gv100_disp_exception
  4. gv100_disp_intr_ctrl_disp
  5. gv100_disp_intr_exc_other
  6. gv100_disp_intr_exc_winim
  7. gv100_disp_intr_exc_win
  8. gv100_disp_intr_head_timing
  9. gv100_disp_intr
  10. gv100_disp_fini
  11. gv100_disp_init
  12. gv100_disp_new

   1 /*
   2  * Copyright 2018 Red Hat Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  */
  22 #include "nv50.h"
  23 #include "head.h"
  24 #include "ior.h"
  25 #include "channv50.h"
  26 #include "rootnv50.h"
  27 
  28 #include <core/gpuobj.h>
  29 #include <subdev/timer.h>
  30 
  31 int
  32 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
  33 {
  34         struct nvkm_device *device = disp->engine.subdev.device;
  35         *pmask = nvkm_rd32(device, 0x610064);
  36         return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
  37 }
  38 
  39 void
  40 gv100_disp_super(struct work_struct *work)
  41 {
  42         struct nv50_disp *disp =
  43                 container_of(work, struct nv50_disp, supervisor);
  44         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
  45         struct nvkm_device *device = subdev->device;
  46         struct nvkm_head *head;
  47         u32 stat = nvkm_rd32(device, 0x6107a8);
  48         u32 mask[4];
  49 
  50         nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super), stat);
  51         list_for_each_entry(head, &disp->base.head, head) {
  52                 mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
  53                 HEAD_DBG(head, "%08x", mask[head->id]);
  54         }
  55 
  56         if (disp->super & 0x00000001) {
  57                 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
  58                 nv50_disp_super_1(disp);
  59                 list_for_each_entry(head, &disp->base.head, head) {
  60                         if (!(mask[head->id] & 0x00001000))
  61                                 continue;
  62                         nv50_disp_super_1_0(disp, head);
  63                 }
  64         } else
  65         if (disp->super & 0x00000002) {
  66                 list_for_each_entry(head, &disp->base.head, head) {
  67                         if (!(mask[head->id] & 0x00001000))
  68                                 continue;
  69                         nv50_disp_super_2_0(disp, head);
  70                 }
  71                 nvkm_outp_route(&disp->base);
  72                 list_for_each_entry(head, &disp->base.head, head) {
  73                         if (!(mask[head->id] & 0x00010000))
  74                                 continue;
  75                         nv50_disp_super_2_1(disp, head);
  76                 }
  77                 list_for_each_entry(head, &disp->base.head, head) {
  78                         if (!(mask[head->id] & 0x00001000))
  79                                 continue;
  80                         nv50_disp_super_2_2(disp, head);
  81                 }
  82         } else
  83         if (disp->super & 0x00000004) {
  84                 list_for_each_entry(head, &disp->base.head, head) {
  85                         if (!(mask[head->id] & 0x00001000))
  86                                 continue;
  87                         nv50_disp_super_3_0(disp, head);
  88                 }
  89         }
  90 
  91         list_for_each_entry(head, &disp->base.head, head)
  92                 nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
  93         nvkm_wr32(device, 0x6107a8, 0x80000000);
  94 }
  95 
  96 static void
  97 gv100_disp_exception(struct nv50_disp *disp, int chid)
  98 {
  99         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 100         struct nvkm_device *device = subdev->device;
 101         u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
 102         u32 type = (stat & 0x00007000) >> 12;
 103         u32 mthd = (stat & 0x00000fff) << 2;
 104         u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
 105         u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
 106         const struct nvkm_enum *reason =
 107                 nvkm_enum_find(nv50_disp_intr_error_type, type);
 108 
 109         nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
 110                            "data %08x code %08x\n",
 111                    chid, stat, type, reason ? reason->name : "",
 112                    mthd, data, code);
 113 
 114         if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
 115                 switch (mthd) {
 116                 case 0x0200:
 117                         nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
 118                         break;
 119                 default:
 120                         break;
 121                 }
 122         }
 123 
 124         nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000);
 125 }
 126 
 127 static void
 128 gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
 129 {
 130         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 131         struct nvkm_device *device = subdev->device;
 132         u32 stat = nvkm_rd32(device, 0x611c30);
 133 
 134         if (stat & 0x00000007) {
 135                 disp->super = (stat & 0x00000007);
 136                 queue_work(disp->wq, &disp->supervisor);
 137                 nvkm_wr32(device, 0x611860, disp->super);
 138                 stat &= ~0x00000007;
 139         }
 140 
 141         /*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to
 142          *      ACK it, nor does RM appear to bother.
 143          */
 144         if (stat & 0x00000008)
 145                 stat &= ~0x00000008;
 146 
 147         if (stat & 0x00000100) {
 148                 unsigned long wndws = nvkm_rd32(device, 0x611858);
 149                 unsigned long other = nvkm_rd32(device, 0x61185c);
 150                 int wndw;
 151 
 152                 nvkm_wr32(device, 0x611858, wndws);
 153                 nvkm_wr32(device, 0x61185c, other);
 154 
 155                 /* AWAKEN_OTHER_CORE. */
 156                 if (other & 0x00000001)
 157                         nv50_disp_chan_uevent_send(disp, 0);
 158 
 159                 /* AWAKEN_WIN_CH(n). */
 160                 for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
 161                         nv50_disp_chan_uevent_send(disp, 1 + wndw);
 162                 }
 163         }
 164 
 165         if (stat)
 166                 nvkm_warn(subdev, "ctrl %08x\n", stat);
 167 }
 168 
 169 static void
 170 gv100_disp_intr_exc_other(struct nv50_disp *disp)
 171 {
 172         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 173         struct nvkm_device *device = subdev->device;
 174         u32 stat = nvkm_rd32(device, 0x611854);
 175         unsigned long mask;
 176         int head;
 177 
 178         if (stat & 0x00000001) {
 179                 nvkm_wr32(device, 0x611854, 0x00000001);
 180                 gv100_disp_exception(disp, 0);
 181                 stat &= ~0x00000001;
 182         }
 183 
 184         if ((mask = (stat & 0x00ff0000) >> 16)) {
 185                 for_each_set_bit(head, &mask, disp->wndw.nr) {
 186                         nvkm_wr32(device, 0x611854, 0x00010000 << head);
 187                         gv100_disp_exception(disp, 73 + head);
 188                         stat &= ~(0x00010000 << head);
 189                 }
 190         }
 191 
 192         if (stat) {
 193                 nvkm_warn(subdev, "exception %08x\n", stat);
 194                 nvkm_wr32(device, 0x611854, stat);
 195         }
 196 }
 197 
 198 static void
 199 gv100_disp_intr_exc_winim(struct nv50_disp *disp)
 200 {
 201         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 202         struct nvkm_device *device = subdev->device;
 203         unsigned long stat = nvkm_rd32(device, 0x611850);
 204         int wndw;
 205 
 206         for_each_set_bit(wndw, &stat, disp->wndw.nr) {
 207                 nvkm_wr32(device, 0x611850, BIT(wndw));
 208                 gv100_disp_exception(disp, 33 + wndw);
 209                 stat &= ~BIT(wndw);
 210         }
 211 
 212         if (stat) {
 213                 nvkm_warn(subdev, "wimm %08x\n", (u32)stat);
 214                 nvkm_wr32(device, 0x611850, stat);
 215         }
 216 }
 217 
 218 static void
 219 gv100_disp_intr_exc_win(struct nv50_disp *disp)
 220 {
 221         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 222         struct nvkm_device *device = subdev->device;
 223         unsigned long stat = nvkm_rd32(device, 0x61184c);
 224         int wndw;
 225 
 226         for_each_set_bit(wndw, &stat, disp->wndw.nr) {
 227                 nvkm_wr32(device, 0x61184c, BIT(wndw));
 228                 gv100_disp_exception(disp, 1 + wndw);
 229                 stat &= ~BIT(wndw);
 230         }
 231 
 232         if (stat) {
 233                 nvkm_warn(subdev, "wndw %08x\n", (u32)stat);
 234                 nvkm_wr32(device, 0x61184c, stat);
 235         }
 236 }
 237 
 238 static void
 239 gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
 240 {
 241         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 242         struct nvkm_device *device = subdev->device;
 243         u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04));
 244 
 245         /* LAST_DATA, LOADV. */
 246         if (stat & 0x00000003) {
 247                 nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003);
 248                 stat &= ~0x00000003;
 249         }
 250 
 251         if (stat & 0x00000004) {
 252                 nvkm_disp_vblank(&disp->base, head);
 253                 nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004);
 254                 stat &= ~0x00000004;
 255         }
 256 
 257         if (stat) {
 258                 nvkm_warn(subdev, "head %08x\n", stat);
 259                 nvkm_wr32(device, 0x611800 + (head * 0x04), stat);
 260         }
 261 }
 262 
 263 void
 264 gv100_disp_intr(struct nv50_disp *disp)
 265 {
 266         struct nvkm_subdev *subdev = &disp->base.engine.subdev;
 267         struct nvkm_device *device = subdev->device;
 268         u32 stat = nvkm_rd32(device, 0x611ec0);
 269         unsigned long mask;
 270         int head;
 271 
 272         if ((mask = (stat & 0x000000ff))) {
 273                 for_each_set_bit(head, &mask, 8) {
 274                         gv100_disp_intr_head_timing(disp, head);
 275                         stat &= ~BIT(head);
 276                 }
 277         }
 278 
 279         if (stat & 0x00000200) {
 280                 gv100_disp_intr_exc_win(disp);
 281                 stat &= ~0x00000200;
 282         }
 283 
 284         if (stat & 0x00000400) {
 285                 gv100_disp_intr_exc_winim(disp);
 286                 stat &= ~0x00000400;
 287         }
 288 
 289         if (stat & 0x00000800) {
 290                 gv100_disp_intr_exc_other(disp);
 291                 stat &= ~0x00000800;
 292         }
 293 
 294         if (stat & 0x00001000) {
 295                 gv100_disp_intr_ctrl_disp(disp);
 296                 stat &= ~0x00001000;
 297         }
 298 
 299         if (stat)
 300                 nvkm_warn(subdev, "intr %08x\n", stat);
 301 }
 302 
 303 void
 304 gv100_disp_fini(struct nv50_disp *disp)
 305 {
 306         struct nvkm_device *device = disp->base.engine.subdev.device;
 307         nvkm_wr32(device, 0x611db0, 0x00000000);
 308 }
 309 
 310 static int
 311 gv100_disp_init(struct nv50_disp *disp)
 312 {
 313         struct nvkm_device *device = disp->base.engine.subdev.device;
 314         struct nvkm_head *head;
 315         int i, j;
 316         u32 tmp;
 317 
 318         /* Claim ownership of display. */
 319         if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
 320                 nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
 321                 if (nvkm_msec(device, 2000,
 322                         if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
 323                                 break;
 324                 ) < 0)
 325                         return -EBUSY;
 326         }
 327 
 328         /* Lock pin capabilities. */
 329         tmp = nvkm_rd32(device, 0x610068);
 330         nvkm_wr32(device, 0x640008, tmp);
 331 
 332         /* SOR capabilities. */
 333         for (i = 0; i < disp->sor.nr; i++) {
 334                 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
 335                 nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
 336                 nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
 337         }
 338 
 339         /* Head capabilities. */
 340         list_for_each_entry(head, &disp->base.head, head) {
 341                 const int id = head->id;
 342 
 343                 /* RG. */
 344                 tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
 345                 nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
 346 
 347                 /* POSTCOMP. */
 348                 for (j = 0; j < 6 * 4; j += 4) {
 349                         tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j);
 350                         nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp);
 351                 }
 352         }
 353 
 354         /* Window capabilities. */
 355         for (i = 0; i < disp->wndw.nr; i++) {
 356                 nvkm_mask(device, 0x640004, 1 << i, 1 << i);
 357                 for (j = 0; j < 6 * 4; j += 4) {
 358                         tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j);
 359                         nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp);
 360                 }
 361         }
 362 
 363         /* IHUB capabilities. */
 364         for (i = 0; i < 4; i++) {
 365                 tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
 366                 nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
 367         }
 368 
 369         nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
 370 
 371         /* Setup instance memory. */
 372         switch (nvkm_memory_target(disp->inst->memory)) {
 373         case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
 374         case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
 375         case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
 376         default:
 377                 break;
 378         }
 379         nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
 380         nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
 381 
 382         /* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
 383         nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
 384         nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
 385 
 386         /* EXC_OTHER: CURSn, CORE. */
 387         nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
 388                                     0x00000001); /* MSK. */
 389         nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
 390 
 391         /* EXC_WINIM. */
 392         nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
 393         nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
 394 
 395         /* EXC_WIN. */
 396         nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
 397         nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
 398 
 399         /* HEAD_TIMING(n): VBLANK. */
 400         list_for_each_entry(head, &disp->base.head, head) {
 401                 const u32 hoff = head->id * 4;
 402                 nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
 403                 nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
 404         }
 405 
 406         /* OR. */
 407         nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
 408         nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
 409         return 0;
 410 }
 411 
 412 static const struct nv50_disp_func
 413 gv100_disp = {
 414         .init = gv100_disp_init,
 415         .fini = gv100_disp_fini,
 416         .intr = gv100_disp_intr,
 417         .uevent = &gv100_disp_chan_uevent,
 418         .super = gv100_disp_super,
 419         .root = &gv100_disp_root_oclass,
 420         .wndw = { .cnt = gv100_disp_wndw_cnt },
 421         .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
 422         .sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new },
 423         .ramht_size = 0x2000,
 424 };
 425 
 426 int
 427 gv100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
 428 {
 429         return nv50_disp_new_(&gv100_disp, device, index, pdisp);
 430 }

/* [<][>][^][v][top][bottom][index][help] */