root/drivers/gpu/drm/i915/selftests/intel_uncore.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. intel_fw_table_check
  2. intel_shadow_table_check
  3. intel_uncore_mock_selftests
  4. live_forcewake_ops
  5. live_forcewake_domains
  6. intel_uncore_live_selftests

   1 /*
   2  * Copyright © 2016 Intel Corporation
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the next
  12  * paragraph) shall be included in all copies or substantial portions of the
  13  * Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21  * IN THE SOFTWARE.
  22  *
  23  */
  24 
  25 #include "../i915_selftest.h"
  26 
  27 static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
  28                                 unsigned int num_ranges,
  29                                 bool is_watertight)
  30 {
  31         unsigned int i;
  32         s32 prev;
  33 
  34         for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
  35                 /* Check that the table is watertight */
  36                 if (is_watertight && (prev + 1) != (s32)ranges->start) {
  37                         pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
  38                                __func__, i, ranges->start, ranges->end, prev);
  39                         return -EINVAL;
  40                 }
  41 
  42                 /* Check that the table never goes backwards */
  43                 if (prev >= (s32)ranges->start) {
  44                         pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
  45                                __func__, i, ranges->start, ranges->end, prev);
  46                         return -EINVAL;
  47                 }
  48 
  49                 /* Check that the entry is valid */
  50                 if (ranges->start >= ranges->end) {
  51                         pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
  52                                __func__, i, ranges->start, ranges->end);
  53                         return -EINVAL;
  54                 }
  55 
  56                 prev = ranges->end;
  57         }
  58 
  59         return 0;
  60 }
  61 
  62 static int intel_shadow_table_check(void)
  63 {
  64         struct {
  65                 const i915_reg_t *regs;
  66                 unsigned int size;
  67         } reg_lists[] = {
  68                 { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
  69                 { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
  70         };
  71         const i915_reg_t *reg;
  72         unsigned int i, j;
  73         s32 prev;
  74 
  75         for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) {
  76                 reg = reg_lists[j].regs;
  77                 for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) {
  78                         u32 offset = i915_mmio_reg_offset(*reg);
  79 
  80                         if (prev >= (s32)offset) {
  81                                 pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
  82                                        __func__, i, offset, prev);
  83                                 return -EINVAL;
  84                         }
  85 
  86                         prev = offset;
  87                 }
  88         }
  89 
  90         return 0;
  91 }
  92 
  93 int intel_uncore_mock_selftests(void)
  94 {
  95         struct {
  96                 const struct intel_forcewake_range *ranges;
  97                 unsigned int num_ranges;
  98                 bool is_watertight;
  99         } fw[] = {
 100                 { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
 101                 { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
 102                 { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
 103                 { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
 104         };
 105         int err, i;
 106 
 107         for (i = 0; i < ARRAY_SIZE(fw); i++) {
 108                 err = intel_fw_table_check(fw[i].ranges,
 109                                            fw[i].num_ranges,
 110                                            fw[i].is_watertight);
 111                 if (err)
 112                         return err;
 113         }
 114 
 115         err = intel_shadow_table_check();
 116         if (err)
 117                 return err;
 118 
 119         return 0;
 120 }
 121 
 122 static int live_forcewake_ops(void *arg)
 123 {
 124         static const struct reg {
 125                 const char *name;
 126                 unsigned long platforms;
 127                 unsigned int offset;
 128         } registers[] = {
 129                 {
 130                         "RING_START",
 131                         INTEL_GEN_MASK(6, 7),
 132                         0x38,
 133                 },
 134                 {
 135                         "RING_MI_MODE",
 136                         INTEL_GEN_MASK(8, BITS_PER_LONG),
 137                         0x9c,
 138                 }
 139         };
 140         const struct reg *r;
 141         struct drm_i915_private *i915 = arg;
 142         struct intel_uncore_forcewake_domain *domain;
 143         struct intel_uncore *uncore = &i915->uncore;
 144         struct intel_engine_cs *engine;
 145         enum intel_engine_id id;
 146         intel_wakeref_t wakeref;
 147         unsigned int tmp;
 148         int err = 0;
 149 
 150         GEM_BUG_ON(i915->gt.awake);
 151 
 152         /* vlv/chv with their pcu behave differently wrt reads */
 153         if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
 154                 pr_debug("PCU fakes forcewake badly; skipping\n");
 155                 return 0;
 156         }
 157 
 158         /*
 159          * Not quite as reliable across the gen as one would hope.
 160          *
 161          * Either our theory of operation is incorrect, or there remain
 162          * external parties interfering with the powerwells.
 163          *
 164          * https://bugs.freedesktop.org/show_bug.cgi?id=110210
 165          */
 166         if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
 167                 return 0;
 168 
 169         /* We have to pick carefully to get the exact behaviour we need */
 170         for (r = registers; r->name; r++)
 171                 if (r->platforms & INTEL_INFO(i915)->gen_mask)
 172                         break;
 173         if (!r->name) {
 174                 pr_debug("Forcewaked register not known for %s; skipping\n",
 175                          intel_platform_name(INTEL_INFO(i915)->platform));
 176                 return 0;
 177         }
 178 
 179         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 180 
 181         for_each_fw_domain(domain, uncore, tmp) {
 182                 smp_store_mb(domain->active, false);
 183                 if (!hrtimer_cancel(&domain->timer))
 184                         continue;
 185 
 186                 intel_uncore_fw_release_timer(&domain->timer);
 187         }
 188 
 189         for_each_engine(engine, i915, id) {
 190                 i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
 191                 u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
 192                 enum forcewake_domains fw_domains;
 193                 u32 val;
 194 
 195                 if (!engine->default_state)
 196                         continue;
 197 
 198                 fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
 199                                                             FW_REG_READ);
 200                 if (!fw_domains)
 201                         continue;
 202 
 203                 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 204                         if (!domain->wake_count)
 205                                 continue;
 206 
 207                         pr_err("fw_domain %s still active, aborting test!\n",
 208                                intel_uncore_forcewake_domain_to_str(domain->id));
 209                         err = -EINVAL;
 210                         goto out_rpm;
 211                 }
 212 
 213                 intel_uncore_forcewake_get(uncore, fw_domains);
 214                 val = readl(reg);
 215                 intel_uncore_forcewake_put(uncore, fw_domains);
 216 
 217                 /* Flush the forcewake release (delayed onto a timer) */
 218                 for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
 219                         smp_store_mb(domain->active, false);
 220                         if (hrtimer_cancel(&domain->timer))
 221                                 intel_uncore_fw_release_timer(&domain->timer);
 222 
 223                         preempt_disable();
 224                         err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
 225                         preempt_enable();
 226                         if (err) {
 227                                 pr_err("Failed to clear fw_domain %s\n",
 228                                        intel_uncore_forcewake_domain_to_str(domain->id));
 229                                 goto out_rpm;
 230                         }
 231                 }
 232 
 233                 if (!val) {
 234                         pr_err("%s:%s was zero while fw was held!\n",
 235                                engine->name, r->name);
 236                         err = -EINVAL;
 237                         goto out_rpm;
 238                 }
 239 
 240                 /* We then expect the read to return 0 outside of the fw */
 241                 if (wait_for(readl(reg) == 0, 100)) {
 242                         pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
 243                                engine->name, r->name, readl(reg), fw_domains);
 244                         err = -ETIMEDOUT;
 245                         goto out_rpm;
 246                 }
 247         }
 248 
 249 out_rpm:
 250         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 251         return err;
 252 }
 253 
 254 static int live_forcewake_domains(void *arg)
 255 {
 256 #define FW_RANGE 0x40000
 257         struct drm_i915_private *dev_priv = arg;
 258         struct intel_uncore *uncore = &dev_priv->uncore;
 259         unsigned long *valid;
 260         u32 offset;
 261         int err;
 262 
 263         if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
 264             !IS_VALLEYVIEW(dev_priv) &&
 265             !IS_CHERRYVIEW(dev_priv))
 266                 return 0;
 267 
 268         /*
 269          * This test may lockup the machine or cause GPU hangs afterwards.
 270          */
 271         if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
 272                 return 0;
 273 
 274         valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
 275         if (!valid)
 276                 return -ENOMEM;
 277 
 278         intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
 279 
 280         check_for_unclaimed_mmio(uncore);
 281         for (offset = 0; offset < FW_RANGE; offset += 4) {
 282                 i915_reg_t reg = { offset };
 283 
 284                 (void)I915_READ_FW(reg);
 285                 if (!check_for_unclaimed_mmio(uncore))
 286                         set_bit(offset, valid);
 287         }
 288 
 289         intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
 290 
 291         err = 0;
 292         for_each_set_bit(offset, valid, FW_RANGE) {
 293                 i915_reg_t reg = { offset };
 294 
 295                 iosf_mbi_punit_acquire();
 296                 intel_uncore_forcewake_reset(uncore);
 297                 iosf_mbi_punit_release();
 298 
 299                 check_for_unclaimed_mmio(uncore);
 300 
 301                 (void)I915_READ(reg);
 302                 if (check_for_unclaimed_mmio(uncore)) {
 303                         pr_err("Unclaimed mmio read to register 0x%04x\n",
 304                                offset);
 305                         err = -EINVAL;
 306                 }
 307         }
 308 
 309         bitmap_free(valid);
 310         return err;
 311 }
 312 
 313 int intel_uncore_live_selftests(struct drm_i915_private *i915)
 314 {
 315         static const struct i915_subtest tests[] = {
 316                 SUBTEST(live_forcewake_ops),
 317                 SUBTEST(live_forcewake_domains),
 318         };
 319 
 320         int err;
 321 
 322         /* Confirm the table we load is still valid */
 323         err = intel_fw_table_check(i915->uncore.fw_domains_table,
 324                                    i915->uncore.fw_domains_table_entries,
 325                                    INTEL_GEN(i915) >= 9);
 326         if (err)
 327                 return err;
 328 
 329         return i915_subtests(tests, i915);
 330 }

/* [<][>][^][v][top][bottom][index][help] */