root/drivers/gpu/drm/i915/i915_sysfs.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. kdev_minor_to_i915
  2. calc_residency
  3. show_rc6_mask
  4. show_rc6_ms
  5. show_rc6p_ms
  6. show_rc6pp_ms
  7. show_media_rc6_ms
  8. l3_access_valid
  9. i915_l3_read
  10. i915_l3_write
  11. gt_act_freq_mhz_show
  12. gt_cur_freq_mhz_show
  13. gt_boost_freq_mhz_show
  14. gt_boost_freq_mhz_store
  15. vlv_rpe_freq_mhz_show
  16. gt_max_freq_mhz_show
  17. gt_max_freq_mhz_store
  18. gt_min_freq_mhz_show
  19. gt_min_freq_mhz_store
  20. gt_rp_mhz_show
  21. error_state_read
  22. error_state_write
  23. i915_setup_error_capture
  24. i915_teardown_error_capture
  25. i915_setup_error_capture
  26. i915_teardown_error_capture
  27. i915_setup_sysfs
  28. i915_teardown_sysfs

   1 /*
   2  * Copyright © 2012 Intel Corporation
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the next
  12  * paragraph) shall be included in all copies or substantial portions of the
  13  * Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21  * IN THE SOFTWARE.
  22  *
  23  * Authors:
  24  *    Ben Widawsky <ben@bwidawsk.net>
  25  *
  26  */
  27 
  28 #include <linux/device.h>
  29 #include <linux/module.h>
  30 #include <linux/stat.h>
  31 #include <linux/sysfs.h>
  32 
  33 #include "i915_drv.h"
  34 #include "i915_sysfs.h"
  35 #include "intel_pm.h"
  36 #include "intel_sideband.h"
  37 
  38 static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
  39 {
  40         struct drm_minor *minor = dev_get_drvdata(kdev);
  41         return to_i915(minor->dev);
  42 }
  43 
  44 #ifdef CONFIG_PM
  45 static u32 calc_residency(struct drm_i915_private *dev_priv,
  46                           i915_reg_t reg)
  47 {
  48         intel_wakeref_t wakeref;
  49         u64 res = 0;
  50 
  51         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
  52                 res = intel_rc6_residency_us(dev_priv, reg);
  53 
  54         return DIV_ROUND_CLOSEST_ULL(res, 1000);
  55 }
  56 
  57 static ssize_t
  58 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
  59 {
  60         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  61         unsigned int mask;
  62 
  63         mask = 0;
  64         if (HAS_RC6(dev_priv))
  65                 mask |= BIT(0);
  66         if (HAS_RC6p(dev_priv))
  67                 mask |= BIT(1);
  68         if (HAS_RC6pp(dev_priv))
  69                 mask |= BIT(2);
  70 
  71         return snprintf(buf, PAGE_SIZE, "%x\n", mask);
  72 }
  73 
  74 static ssize_t
  75 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  76 {
  77         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  78         u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
  79         return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  80 }
  81 
  82 static ssize_t
  83 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  84 {
  85         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  86         u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
  87         return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
  88 }
  89 
  90 static ssize_t
  91 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  92 {
  93         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
  94         u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
  95         return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
  96 }
  97 
  98 static ssize_t
  99 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 100 {
 101         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 102         u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
 103         return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 104 }
 105 
 106 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
 107 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
 108 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
 109 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
 110 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
 111 
 112 static struct attribute *rc6_attrs[] = {
 113         &dev_attr_rc6_enable.attr,
 114         &dev_attr_rc6_residency_ms.attr,
 115         NULL
 116 };
 117 
 118 static const struct attribute_group rc6_attr_group = {
 119         .name = power_group_name,
 120         .attrs =  rc6_attrs
 121 };
 122 
 123 static struct attribute *rc6p_attrs[] = {
 124         &dev_attr_rc6p_residency_ms.attr,
 125         &dev_attr_rc6pp_residency_ms.attr,
 126         NULL
 127 };
 128 
 129 static const struct attribute_group rc6p_attr_group = {
 130         .name = power_group_name,
 131         .attrs =  rc6p_attrs
 132 };
 133 
 134 static struct attribute *media_rc6_attrs[] = {
 135         &dev_attr_media_rc6_residency_ms.attr,
 136         NULL
 137 };
 138 
 139 static const struct attribute_group media_rc6_attr_group = {
 140         .name = power_group_name,
 141         .attrs =  media_rc6_attrs
 142 };
 143 #endif
 144 
 145 static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
 146 {
 147         if (!HAS_L3_DPF(dev_priv))
 148                 return -EPERM;
 149 
 150         if (offset % 4 != 0)
 151                 return -EINVAL;
 152 
 153         if (offset >= GEN7_L3LOG_SIZE)
 154                 return -ENXIO;
 155 
 156         return 0;
 157 }
 158 
 159 static ssize_t
 160 i915_l3_read(struct file *filp, struct kobject *kobj,
 161              struct bin_attribute *attr, char *buf,
 162              loff_t offset, size_t count)
 163 {
 164         struct device *kdev = kobj_to_dev(kobj);
 165         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 166         struct drm_device *dev = &dev_priv->drm;
 167         int slice = (int)(uintptr_t)attr->private;
 168         int ret;
 169 
 170         count = round_down(count, 4);
 171 
 172         ret = l3_access_valid(dev_priv, offset);
 173         if (ret)
 174                 return ret;
 175 
 176         count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
 177 
 178         ret = i915_mutex_lock_interruptible(dev);
 179         if (ret)
 180                 return ret;
 181 
 182         if (dev_priv->l3_parity.remap_info[slice])
 183                 memcpy(buf,
 184                        dev_priv->l3_parity.remap_info[slice] + (offset/4),
 185                        count);
 186         else
 187                 memset(buf, 0, count);
 188 
 189         mutex_unlock(&dev->struct_mutex);
 190 
 191         return count;
 192 }
 193 
 194 static ssize_t
 195 i915_l3_write(struct file *filp, struct kobject *kobj,
 196               struct bin_attribute *attr, char *buf,
 197               loff_t offset, size_t count)
 198 {
 199         struct device *kdev = kobj_to_dev(kobj);
 200         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 201         struct drm_device *dev = &dev_priv->drm;
 202         struct i915_gem_context *ctx;
 203         int slice = (int)(uintptr_t)attr->private;
 204         u32 **remap_info;
 205         int ret;
 206 
 207         ret = l3_access_valid(dev_priv, offset);
 208         if (ret)
 209                 return ret;
 210 
 211         ret = i915_mutex_lock_interruptible(dev);
 212         if (ret)
 213                 return ret;
 214 
 215         remap_info = &dev_priv->l3_parity.remap_info[slice];
 216         if (!*remap_info) {
 217                 *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 218                 if (!*remap_info) {
 219                         ret = -ENOMEM;
 220                         goto out;
 221                 }
 222         }
 223 
 224         /* TODO: Ideally we really want a GPU reset here to make sure errors
 225          * aren't propagated. Since I cannot find a stable way to reset the GPU
 226          * at this point it is left as a TODO.
 227         */
 228         memcpy(*remap_info + (offset/4), buf, count);
 229 
 230         /* NB: We defer the remapping until we switch to the context */
 231         list_for_each_entry(ctx, &dev_priv->contexts.list, link)
 232                 ctx->remap_slice |= (1<<slice);
 233 
 234         ret = count;
 235 
 236 out:
 237         mutex_unlock(&dev->struct_mutex);
 238 
 239         return ret;
 240 }
 241 
 242 static const struct bin_attribute dpf_attrs = {
 243         .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
 244         .size = GEN7_L3LOG_SIZE,
 245         .read = i915_l3_read,
 246         .write = i915_l3_write,
 247         .mmap = NULL,
 248         .private = (void *)0
 249 };
 250 
 251 static const struct bin_attribute dpf_attrs_1 = {
 252         .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
 253         .size = GEN7_L3LOG_SIZE,
 254         .read = i915_l3_read,
 255         .write = i915_l3_write,
 256         .mmap = NULL,
 257         .private = (void *)1
 258 };
 259 
 260 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 261                                     struct device_attribute *attr, char *buf)
 262 {
 263         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 264         intel_wakeref_t wakeref;
 265         u32 freq;
 266 
 267         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 268 
 269         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 270                 vlv_punit_get(dev_priv);
 271                 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 272                 vlv_punit_put(dev_priv);
 273 
 274                 freq = (freq >> 8) & 0xff;
 275         } else {
 276                 freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
 277         }
 278 
 279         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 280 
 281         return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
 282 }
 283 
 284 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 285                                     struct device_attribute *attr, char *buf)
 286 {
 287         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 288 
 289         return snprintf(buf, PAGE_SIZE, "%d\n",
 290                         intel_gpu_freq(dev_priv,
 291                                        dev_priv->gt_pm.rps.cur_freq));
 292 }
 293 
 294 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 295 {
 296         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 297 
 298         return snprintf(buf, PAGE_SIZE, "%d\n",
 299                         intel_gpu_freq(dev_priv,
 300                                        dev_priv->gt_pm.rps.boost_freq));
 301 }
 302 
 303 static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
 304                                        struct device_attribute *attr,
 305                                        const char *buf, size_t count)
 306 {
 307         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 308         struct intel_rps *rps = &dev_priv->gt_pm.rps;
 309         bool boost = false;
 310         ssize_t ret;
 311         u32 val;
 312 
 313         ret = kstrtou32(buf, 0, &val);
 314         if (ret)
 315                 return ret;
 316 
 317         /* Validate against (static) hardware limits */
 318         val = intel_freq_opcode(dev_priv, val);
 319         if (val < rps->min_freq || val > rps->max_freq)
 320                 return -EINVAL;
 321 
 322         mutex_lock(&rps->lock);
 323         if (val != rps->boost_freq) {
 324                 rps->boost_freq = val;
 325                 boost = atomic_read(&rps->num_waiters);
 326         }
 327         mutex_unlock(&rps->lock);
 328         if (boost)
 329                 schedule_work(&rps->work);
 330 
 331         return count;
 332 }
 333 
 334 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 335                                      struct device_attribute *attr, char *buf)
 336 {
 337         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 338 
 339         return snprintf(buf, PAGE_SIZE, "%d\n",
 340                         intel_gpu_freq(dev_priv,
 341                                        dev_priv->gt_pm.rps.efficient_freq));
 342 }
 343 
 344 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 345 {
 346         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 347 
 348         return snprintf(buf, PAGE_SIZE, "%d\n",
 349                         intel_gpu_freq(dev_priv,
 350                                        dev_priv->gt_pm.rps.max_freq_softlimit));
 351 }
 352 
 353 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 354                                      struct device_attribute *attr,
 355                                      const char *buf, size_t count)
 356 {
 357         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 358         struct intel_rps *rps = &dev_priv->gt_pm.rps;
 359         intel_wakeref_t wakeref;
 360         u32 val;
 361         ssize_t ret;
 362 
 363         ret = kstrtou32(buf, 0, &val);
 364         if (ret)
 365                 return ret;
 366 
 367         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 368         mutex_lock(&rps->lock);
 369 
 370         val = intel_freq_opcode(dev_priv, val);
 371         if (val < rps->min_freq ||
 372             val > rps->max_freq ||
 373             val < rps->min_freq_softlimit) {
 374                 ret = -EINVAL;
 375                 goto unlock;
 376         }
 377 
 378         if (val > rps->rp0_freq)
 379                 DRM_DEBUG("User requested overclocking to %d\n",
 380                           intel_gpu_freq(dev_priv, val));
 381 
 382         rps->max_freq_softlimit = val;
 383 
 384         val = clamp_t(int, rps->cur_freq,
 385                       rps->min_freq_softlimit,
 386                       rps->max_freq_softlimit);
 387 
 388         /* We still need *_set_rps to process the new max_delay and
 389          * update the interrupt limits and PMINTRMSK even though
 390          * frequency request may be unchanged. */
 391         ret = intel_set_rps(dev_priv, val);
 392 
 393 unlock:
 394         mutex_unlock(&rps->lock);
 395         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 396 
 397         return ret ?: count;
 398 }
 399 
 400 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 401 {
 402         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 403 
 404         return snprintf(buf, PAGE_SIZE, "%d\n",
 405                         intel_gpu_freq(dev_priv,
 406                                        dev_priv->gt_pm.rps.min_freq_softlimit));
 407 }
 408 
 409 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 410                                      struct device_attribute *attr,
 411                                      const char *buf, size_t count)
 412 {
 413         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 414         struct intel_rps *rps = &dev_priv->gt_pm.rps;
 415         intel_wakeref_t wakeref;
 416         u32 val;
 417         ssize_t ret;
 418 
 419         ret = kstrtou32(buf, 0, &val);
 420         if (ret)
 421                 return ret;
 422 
 423         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
 424         mutex_lock(&rps->lock);
 425 
 426         val = intel_freq_opcode(dev_priv, val);
 427         if (val < rps->min_freq ||
 428             val > rps->max_freq ||
 429             val > rps->max_freq_softlimit) {
 430                 ret = -EINVAL;
 431                 goto unlock;
 432         }
 433 
 434         rps->min_freq_softlimit = val;
 435 
 436         val = clamp_t(int, rps->cur_freq,
 437                       rps->min_freq_softlimit,
 438                       rps->max_freq_softlimit);
 439 
 440         /* We still need *_set_rps to process the new min_delay and
 441          * update the interrupt limits and PMINTRMSK even though
 442          * frequency request may be unchanged. */
 443         ret = intel_set_rps(dev_priv, val);
 444 
 445 unlock:
 446         mutex_unlock(&rps->lock);
 447         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 448 
 449         return ret ?: count;
 450 }
 451 
 452 static DEVICE_ATTR_RO(gt_act_freq_mhz);
 453 static DEVICE_ATTR_RO(gt_cur_freq_mhz);
 454 static DEVICE_ATTR_RW(gt_boost_freq_mhz);
 455 static DEVICE_ATTR_RW(gt_max_freq_mhz);
 456 static DEVICE_ATTR_RW(gt_min_freq_mhz);
 457 
 458 static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
 459 
 460 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
 461 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 462 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 463 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 464 
 465 /* For now we have a static number of RP states */
 466 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 467 {
 468         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 469         struct intel_rps *rps = &dev_priv->gt_pm.rps;
 470         u32 val;
 471 
 472         if (attr == &dev_attr_gt_RP0_freq_mhz)
 473                 val = intel_gpu_freq(dev_priv, rps->rp0_freq);
 474         else if (attr == &dev_attr_gt_RP1_freq_mhz)
 475                 val = intel_gpu_freq(dev_priv, rps->rp1_freq);
 476         else if (attr == &dev_attr_gt_RPn_freq_mhz)
 477                 val = intel_gpu_freq(dev_priv, rps->min_freq);
 478         else
 479                 BUG();
 480 
 481         return snprintf(buf, PAGE_SIZE, "%d\n", val);
 482 }
 483 
 484 static const struct attribute * const gen6_attrs[] = {
 485         &dev_attr_gt_act_freq_mhz.attr,
 486         &dev_attr_gt_cur_freq_mhz.attr,
 487         &dev_attr_gt_boost_freq_mhz.attr,
 488         &dev_attr_gt_max_freq_mhz.attr,
 489         &dev_attr_gt_min_freq_mhz.attr,
 490         &dev_attr_gt_RP0_freq_mhz.attr,
 491         &dev_attr_gt_RP1_freq_mhz.attr,
 492         &dev_attr_gt_RPn_freq_mhz.attr,
 493         NULL,
 494 };
 495 
 496 static const struct attribute * const vlv_attrs[] = {
 497         &dev_attr_gt_act_freq_mhz.attr,
 498         &dev_attr_gt_cur_freq_mhz.attr,
 499         &dev_attr_gt_boost_freq_mhz.attr,
 500         &dev_attr_gt_max_freq_mhz.attr,
 501         &dev_attr_gt_min_freq_mhz.attr,
 502         &dev_attr_gt_RP0_freq_mhz.attr,
 503         &dev_attr_gt_RP1_freq_mhz.attr,
 504         &dev_attr_gt_RPn_freq_mhz.attr,
 505         &dev_attr_vlv_rpe_freq_mhz.attr,
 506         NULL,
 507 };
 508 
 509 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
 510 
 511 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 512                                 struct bin_attribute *attr, char *buf,
 513                                 loff_t off, size_t count)
 514 {
 515 
 516         struct device *kdev = kobj_to_dev(kobj);
 517         struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
 518         struct i915_gpu_state *gpu;
 519         ssize_t ret;
 520 
 521         gpu = i915_first_error_state(i915);
 522         if (IS_ERR(gpu)) {
 523                 ret = PTR_ERR(gpu);
 524         } else if (gpu) {
 525                 ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
 526                 i915_gpu_state_put(gpu);
 527         } else {
 528                 const char *str = "No error state collected\n";
 529                 size_t len = strlen(str);
 530 
 531                 ret = min_t(size_t, count, len - off);
 532                 memcpy(buf, str + off, ret);
 533         }
 534 
 535         return ret;
 536 }
 537 
 538 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
 539                                  struct bin_attribute *attr, char *buf,
 540                                  loff_t off, size_t count)
 541 {
 542         struct device *kdev = kobj_to_dev(kobj);
 543         struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
 544 
 545         DRM_DEBUG_DRIVER("Resetting error state\n");
 546         i915_reset_error_state(dev_priv);
 547 
 548         return count;
 549 }
 550 
 551 static const struct bin_attribute error_state_attr = {
 552         .attr.name = "error",
 553         .attr.mode = S_IRUSR | S_IWUSR,
 554         .size = 0,
 555         .read = error_state_read,
 556         .write = error_state_write,
 557 };
 558 
 559 static void i915_setup_error_capture(struct device *kdev)
 560 {
 561         if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
 562                 DRM_ERROR("error_state sysfs setup failed\n");
 563 }
 564 
 565 static void i915_teardown_error_capture(struct device *kdev)
 566 {
 567         sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
 568 }
 569 #else
 570 static void i915_setup_error_capture(struct device *kdev) {}
 571 static void i915_teardown_error_capture(struct device *kdev) {}
 572 #endif
 573 
 574 void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 575 {
 576         struct device *kdev = dev_priv->drm.primary->kdev;
 577         int ret;
 578 
 579 #ifdef CONFIG_PM
 580         if (HAS_RC6(dev_priv)) {
 581                 ret = sysfs_merge_group(&kdev->kobj,
 582                                         &rc6_attr_group);
 583                 if (ret)
 584                         DRM_ERROR("RC6 residency sysfs setup failed\n");
 585         }
 586         if (HAS_RC6p(dev_priv)) {
 587                 ret = sysfs_merge_group(&kdev->kobj,
 588                                         &rc6p_attr_group);
 589                 if (ret)
 590                         DRM_ERROR("RC6p residency sysfs setup failed\n");
 591         }
 592         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 593                 ret = sysfs_merge_group(&kdev->kobj,
 594                                         &media_rc6_attr_group);
 595                 if (ret)
 596                         DRM_ERROR("Media RC6 residency sysfs setup failed\n");
 597         }
 598 #endif
 599         if (HAS_L3_DPF(dev_priv)) {
 600                 ret = device_create_bin_file(kdev, &dpf_attrs);
 601                 if (ret)
 602                         DRM_ERROR("l3 parity sysfs setup failed\n");
 603 
 604                 if (NUM_L3_SLICES(dev_priv) > 1) {
 605                         ret = device_create_bin_file(kdev,
 606                                                      &dpf_attrs_1);
 607                         if (ret)
 608                                 DRM_ERROR("l3 parity slice 1 setup failed\n");
 609                 }
 610         }
 611 
 612         ret = 0;
 613         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 614                 ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
 615         else if (INTEL_GEN(dev_priv) >= 6)
 616                 ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
 617         if (ret)
 618                 DRM_ERROR("RPS sysfs setup failed\n");
 619 
 620         i915_setup_error_capture(kdev);
 621 }
 622 
 623 void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
 624 {
 625         struct device *kdev = dev_priv->drm.primary->kdev;
 626 
 627         i915_teardown_error_capture(kdev);
 628 
 629         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
 630                 sysfs_remove_files(&kdev->kobj, vlv_attrs);
 631         else
 632                 sysfs_remove_files(&kdev->kobj, gen6_attrs);
 633         device_remove_bin_file(kdev,  &dpf_attrs_1);
 634         device_remove_bin_file(kdev,  &dpf_attrs);
 635 #ifdef CONFIG_PM
 636         sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
 637         sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
 638 #endif
 639 }

/* [<][>][^][v][top][bottom][index][help] */