root/arch/powerpc/platforms/cell/spu_base.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. spu_invalidate_slbs
  2. spu_flush_all_slbs
  3. mm_needs_global_tlbie
  4. spu_associate_mm
  5. spu_64k_pages_available
  6. spu_restart_dma
  7. spu_load_slb
  8. __spu_trap_data_seg
  9. __spu_trap_data_map
  10. __spu_kernel_slb
  11. __slb_present
  12. spu_setup_kernel_slbs
  13. spu_irq_class_0
  14. spu_irq_class_1
  15. spu_irq_class_2
  16. spu_request_irqs
  17. spu_free_irqs
  18. spu_init_channels
  19. spu_add_dev_attr
  20. spu_add_dev_attr_group
  21. spu_remove_dev_attr
  22. spu_remove_dev_attr_group
  23. spu_create_dev
  24. create_spu
  25. spu_acct_time
  26. spu_stat_show
  27. crash_kexec_stop_spus
  28. crash_register_spus
  29. crash_register_spus
  30. spu_shutdown
  31. init_spu_base

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * Low-level SPU handling
   4  *
   5  * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
   6  *
   7  * Author: Arnd Bergmann <arndb@de.ibm.com>
   8  */
   9 
  10 #undef DEBUG
  11 
  12 #include <linux/interrupt.h>
  13 #include <linux/list.h>
  14 #include <linux/init.h>
  15 #include <linux/ptrace.h>
  16 #include <linux/slab.h>
  17 #include <linux/wait.h>
  18 #include <linux/mm.h>
  19 #include <linux/io.h>
  20 #include <linux/mutex.h>
  21 #include <linux/linux_logo.h>
  22 #include <linux/syscore_ops.h>
  23 #include <asm/spu.h>
  24 #include <asm/spu_priv1.h>
  25 #include <asm/spu_csa.h>
  26 #include <asm/xmon.h>
  27 #include <asm/prom.h>
  28 #include <asm/kexec.h>
  29 
  30 const struct spu_management_ops *spu_management_ops;
  31 EXPORT_SYMBOL_GPL(spu_management_ops);
  32 
  33 const struct spu_priv1_ops *spu_priv1_ops;
  34 EXPORT_SYMBOL_GPL(spu_priv1_ops);
  35 
  36 struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
  37 EXPORT_SYMBOL_GPL(cbe_spu_info);
  38 
  39 /*
  40  * The spufs fault-handling code needs to call force_sig_fault to raise signals
  41  * on DMA errors. Export it here to avoid general kernel-wide access to this
  42  * function
  43  */
  44 EXPORT_SYMBOL_GPL(force_sig_fault);
  45 
  46 /*
  47  * Protects cbe_spu_info and spu->number.
  48  */
  49 static DEFINE_SPINLOCK(spu_lock);
  50 
  51 /*
  52  * List of all spus in the system.
  53  *
  54  * This list is iterated by callers from irq context and callers that
  55  * want to sleep.  Thus modifications need to be done with both
  56  * spu_full_list_lock and spu_full_list_mutex held, while iterating
  57  * through it requires either of these locks.
  58  *
  59  * In addition spu_full_list_lock protects all assignments to
  60  * spu->mm.
  61  */
  62 static LIST_HEAD(spu_full_list);
  63 static DEFINE_SPINLOCK(spu_full_list_lock);
  64 static DEFINE_MUTEX(spu_full_list_mutex);
  65 
  66 void spu_invalidate_slbs(struct spu *spu)
  67 {
  68         struct spu_priv2 __iomem *priv2 = spu->priv2;
  69         unsigned long flags;
  70 
  71         spin_lock_irqsave(&spu->register_lock, flags);
  72         if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
  73                 out_be64(&priv2->slb_invalidate_all_W, 0UL);
  74         spin_unlock_irqrestore(&spu->register_lock, flags);
  75 }
  76 EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
  77 
  78 /* This is called by the MM core when a segment size is changed, to
  79  * request a flush of all the SPEs using a given mm
  80  */
  81 void spu_flush_all_slbs(struct mm_struct *mm)
  82 {
  83         struct spu *spu;
  84         unsigned long flags;
  85 
  86         spin_lock_irqsave(&spu_full_list_lock, flags);
  87         list_for_each_entry(spu, &spu_full_list, full_list) {
  88                 if (spu->mm == mm)
  89                         spu_invalidate_slbs(spu);
  90         }
  91         spin_unlock_irqrestore(&spu_full_list_lock, flags);
  92 }
  93 
  94 /* The hack below stinks... try to do something better one of
  95  * these days... Does it even work properly with NR_CPUS == 1 ?
  96  */
  97 static inline void mm_needs_global_tlbie(struct mm_struct *mm)
  98 {
  99         int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
 100 
 101         /* Global TLBIE broadcast required with SPEs. */
 102         bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
 103 }
 104 
 105 void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
 106 {
 107         unsigned long flags;
 108 
 109         spin_lock_irqsave(&spu_full_list_lock, flags);
 110         spu->mm = mm;
 111         spin_unlock_irqrestore(&spu_full_list_lock, flags);
 112         if (mm)
 113                 mm_needs_global_tlbie(mm);
 114 }
 115 EXPORT_SYMBOL_GPL(spu_associate_mm);
 116 
 117 int spu_64k_pages_available(void)
 118 {
 119         return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
 120 }
 121 EXPORT_SYMBOL_GPL(spu_64k_pages_available);
 122 
 123 static void spu_restart_dma(struct spu *spu)
 124 {
 125         struct spu_priv2 __iomem *priv2 = spu->priv2;
 126 
 127         if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
 128                 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
 129         else {
 130                 set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
 131                 mb();
 132         }
 133 }
 134 
 135 static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
 136 {
 137         struct spu_priv2 __iomem *priv2 = spu->priv2;
 138 
 139         pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
 140                         __func__, slbe, slb->vsid, slb->esid);
 141 
 142         out_be64(&priv2->slb_index_W, slbe);
 143         /* set invalid before writing vsid */
 144         out_be64(&priv2->slb_esid_RW, 0);
 145         /* now it's safe to write the vsid */
 146         out_be64(&priv2->slb_vsid_RW, slb->vsid);
 147         /* setting the new esid makes the entry valid again */
 148         out_be64(&priv2->slb_esid_RW, slb->esid);
 149 }
 150 
 151 static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
 152 {
 153         struct copro_slb slb;
 154         int ret;
 155 
 156         ret = copro_calculate_slb(spu->mm, ea, &slb);
 157         if (ret)
 158                 return ret;
 159 
 160         spu_load_slb(spu, spu->slb_replace, &slb);
 161 
 162         spu->slb_replace++;
 163         if (spu->slb_replace >= 8)
 164                 spu->slb_replace = 0;
 165 
 166         spu_restart_dma(spu);
 167         spu->stats.slb_flt++;
 168         return 0;
 169 }
 170 
 171 extern int hash_page(unsigned long ea, unsigned long access,
 172                      unsigned long trap, unsigned long dsisr); //XXX
 173 static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
 174 {
 175         int ret;
 176 
 177         pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
 178 
 179         /*
 180          * Handle kernel space hash faults immediately. User hash
 181          * faults need to be deferred to process context.
 182          */
 183         if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
 184             (get_region_id(ea) != USER_REGION_ID)) {
 185 
 186                 spin_unlock(&spu->register_lock);
 187                 ret = hash_page(ea,
 188                                 _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
 189                                 0x300, dsisr);
 190                 spin_lock(&spu->register_lock);
 191 
 192                 if (!ret) {
 193                         spu_restart_dma(spu);
 194                         return 0;
 195                 }
 196         }
 197 
 198         spu->class_1_dar = ea;
 199         spu->class_1_dsisr = dsisr;
 200 
 201         spu->stop_callback(spu, 1);
 202 
 203         spu->class_1_dar = 0;
 204         spu->class_1_dsisr = 0;
 205 
 206         return 0;
 207 }
 208 
 209 static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
 210 {
 211         unsigned long ea = (unsigned long)addr;
 212         u64 llp;
 213 
 214         if (get_region_id(ea) == LINEAR_MAP_REGION_ID)
 215                 llp = mmu_psize_defs[mmu_linear_psize].sllp;
 216         else
 217                 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
 218 
 219         slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
 220                 SLB_VSID_KERNEL | llp;
 221         slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
 222 }
 223 
 224 /**
 225  * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
 226  * address @new_addr is present.
 227  */
 228 static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
 229                 void *new_addr)
 230 {
 231         unsigned long ea = (unsigned long)new_addr;
 232         int i;
 233 
 234         for (i = 0; i < nr_slbs; i++)
 235                 if (!((slbs[i].esid ^ ea) & ESID_MASK))
 236                         return 1;
 237 
 238         return 0;
 239 }
 240 
 241 /**
 242  * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
 243  * need to map both the context save area, and the save/restore code.
 244  *
 245  * Because the lscsa and code may cross segment boundaries, we check to see
 246  * if mappings are required for the start and end of each range. We currently
 247  * assume that the mappings are smaller that one segment - if not, something
 248  * is seriously wrong.
 249  */
 250 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
 251                 void *code, int code_size)
 252 {
 253         struct copro_slb slbs[4];
 254         int i, nr_slbs = 0;
 255         /* start and end addresses of both mappings */
 256         void *addrs[] = {
 257                 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
 258                 code, code + code_size - 1
 259         };
 260 
 261         /* check the set of addresses, and create a new entry in the slbs array
 262          * if there isn't already a SLB for that address */
 263         for (i = 0; i < ARRAY_SIZE(addrs); i++) {
 264                 if (__slb_present(slbs, nr_slbs, addrs[i]))
 265                         continue;
 266 
 267                 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
 268                 nr_slbs++;
 269         }
 270 
 271         spin_lock_irq(&spu->register_lock);
 272         /* Add the set of SLBs */
 273         for (i = 0; i < nr_slbs; i++)
 274                 spu_load_slb(spu, i, &slbs[i]);
 275         spin_unlock_irq(&spu->register_lock);
 276 }
 277 EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
 278 
 279 static irqreturn_t
 280 spu_irq_class_0(int irq, void *data)
 281 {
 282         struct spu *spu;
 283         unsigned long stat, mask;
 284 
 285         spu = data;
 286 
 287         spin_lock(&spu->register_lock);
 288         mask = spu_int_mask_get(spu, 0);
 289         stat = spu_int_stat_get(spu, 0) & mask;
 290 
 291         spu->class_0_pending |= stat;
 292         spu->class_0_dar = spu_mfc_dar_get(spu);
 293         spu->stop_callback(spu, 0);
 294         spu->class_0_pending = 0;
 295         spu->class_0_dar = 0;
 296 
 297         spu_int_stat_clear(spu, 0, stat);
 298         spin_unlock(&spu->register_lock);
 299 
 300         return IRQ_HANDLED;
 301 }
 302 
 303 static irqreturn_t
 304 spu_irq_class_1(int irq, void *data)
 305 {
 306         struct spu *spu;
 307         unsigned long stat, mask, dar, dsisr;
 308 
 309         spu = data;
 310 
 311         /* atomically read & clear class1 status. */
 312         spin_lock(&spu->register_lock);
 313         mask  = spu_int_mask_get(spu, 1);
 314         stat  = spu_int_stat_get(spu, 1) & mask;
 315         dar   = spu_mfc_dar_get(spu);
 316         dsisr = spu_mfc_dsisr_get(spu);
 317         if (stat & CLASS1_STORAGE_FAULT_INTR)
 318                 spu_mfc_dsisr_set(spu, 0ul);
 319         spu_int_stat_clear(spu, 1, stat);
 320 
 321         pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
 322                         dar, dsisr);
 323 
 324         if (stat & CLASS1_SEGMENT_FAULT_INTR)
 325                 __spu_trap_data_seg(spu, dar);
 326 
 327         if (stat & CLASS1_STORAGE_FAULT_INTR)
 328                 __spu_trap_data_map(spu, dar, dsisr);
 329 
 330         if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
 331                 ;
 332 
 333         if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
 334                 ;
 335 
 336         spu->class_1_dsisr = 0;
 337         spu->class_1_dar = 0;
 338 
 339         spin_unlock(&spu->register_lock);
 340 
 341         return stat ? IRQ_HANDLED : IRQ_NONE;
 342 }
 343 
 344 static irqreturn_t
 345 spu_irq_class_2(int irq, void *data)
 346 {
 347         struct spu *spu;
 348         unsigned long stat;
 349         unsigned long mask;
 350         const int mailbox_intrs =
 351                 CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
 352 
 353         spu = data;
 354         spin_lock(&spu->register_lock);
 355         stat = spu_int_stat_get(spu, 2);
 356         mask = spu_int_mask_get(spu, 2);
 357         /* ignore interrupts we're not waiting for */
 358         stat &= mask;
 359         /* mailbox interrupts are level triggered. mask them now before
 360          * acknowledging */
 361         if (stat & mailbox_intrs)
 362                 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
 363         /* acknowledge all interrupts before the callbacks */
 364         spu_int_stat_clear(spu, 2, stat);
 365 
 366         pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
 367 
 368         if (stat & CLASS2_MAILBOX_INTR)
 369                 spu->ibox_callback(spu);
 370 
 371         if (stat & CLASS2_SPU_STOP_INTR)
 372                 spu->stop_callback(spu, 2);
 373 
 374         if (stat & CLASS2_SPU_HALT_INTR)
 375                 spu->stop_callback(spu, 2);
 376 
 377         if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
 378                 spu->mfc_callback(spu);
 379 
 380         if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
 381                 spu->wbox_callback(spu);
 382 
 383         spu->stats.class2_intr++;
 384 
 385         spin_unlock(&spu->register_lock);
 386 
 387         return stat ? IRQ_HANDLED : IRQ_NONE;
 388 }
 389 
 390 static int spu_request_irqs(struct spu *spu)
 391 {
 392         int ret = 0;
 393 
 394         if (spu->irqs[0]) {
 395                 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
 396                          spu->number);
 397                 ret = request_irq(spu->irqs[0], spu_irq_class_0,
 398                                   0, spu->irq_c0, spu);
 399                 if (ret)
 400                         goto bail0;
 401         }
 402         if (spu->irqs[1]) {
 403                 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
 404                          spu->number);
 405                 ret = request_irq(spu->irqs[1], spu_irq_class_1,
 406                                   0, spu->irq_c1, spu);
 407                 if (ret)
 408                         goto bail1;
 409         }
 410         if (spu->irqs[2]) {
 411                 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
 412                          spu->number);
 413                 ret = request_irq(spu->irqs[2], spu_irq_class_2,
 414                                   0, spu->irq_c2, spu);
 415                 if (ret)
 416                         goto bail2;
 417         }
 418         return 0;
 419 
 420 bail2:
 421         if (spu->irqs[1])
 422                 free_irq(spu->irqs[1], spu);
 423 bail1:
 424         if (spu->irqs[0])
 425                 free_irq(spu->irqs[0], spu);
 426 bail0:
 427         return ret;
 428 }
 429 
 430 static void spu_free_irqs(struct spu *spu)
 431 {
 432         if (spu->irqs[0])
 433                 free_irq(spu->irqs[0], spu);
 434         if (spu->irqs[1])
 435                 free_irq(spu->irqs[1], spu);
 436         if (spu->irqs[2])
 437                 free_irq(spu->irqs[2], spu);
 438 }
 439 
 440 void spu_init_channels(struct spu *spu)
 441 {
 442         static const struct {
 443                  unsigned channel;
 444                  unsigned count;
 445         } zero_list[] = {
 446                 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
 447                 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
 448         }, count_list[] = {
 449                 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
 450                 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
 451                 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
 452         };
 453         struct spu_priv2 __iomem *priv2;
 454         int i;
 455 
 456         priv2 = spu->priv2;
 457 
 458         /* initialize all channel data to zero */
 459         for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
 460                 int count;
 461 
 462                 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
 463                 for (count = 0; count < zero_list[i].count; count++)
 464                         out_be64(&priv2->spu_chnldata_RW, 0);
 465         }
 466 
 467         /* initialize channel counts to meaningful values */
 468         for (i = 0; i < ARRAY_SIZE(count_list); i++) {
 469                 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
 470                 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
 471         }
 472 }
 473 EXPORT_SYMBOL_GPL(spu_init_channels);
 474 
 475 static struct bus_type spu_subsys = {
 476         .name = "spu",
 477         .dev_name = "spu",
 478 };
 479 
 480 int spu_add_dev_attr(struct device_attribute *attr)
 481 {
 482         struct spu *spu;
 483 
 484         mutex_lock(&spu_full_list_mutex);
 485         list_for_each_entry(spu, &spu_full_list, full_list)
 486                 device_create_file(&spu->dev, attr);
 487         mutex_unlock(&spu_full_list_mutex);
 488 
 489         return 0;
 490 }
 491 EXPORT_SYMBOL_GPL(spu_add_dev_attr);
 492 
 493 int spu_add_dev_attr_group(struct attribute_group *attrs)
 494 {
 495         struct spu *spu;
 496         int rc = 0;
 497 
 498         mutex_lock(&spu_full_list_mutex);
 499         list_for_each_entry(spu, &spu_full_list, full_list) {
 500                 rc = sysfs_create_group(&spu->dev.kobj, attrs);
 501 
 502                 /* we're in trouble here, but try unwinding anyway */
 503                 if (rc) {
 504                         printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
 505                                         __func__, attrs->name);
 506 
 507                         list_for_each_entry_continue_reverse(spu,
 508                                         &spu_full_list, full_list)
 509                                 sysfs_remove_group(&spu->dev.kobj, attrs);
 510                         break;
 511                 }
 512         }
 513 
 514         mutex_unlock(&spu_full_list_mutex);
 515 
 516         return rc;
 517 }
 518 EXPORT_SYMBOL_GPL(spu_add_dev_attr_group);
 519 
 520 
 521 void spu_remove_dev_attr(struct device_attribute *attr)
 522 {
 523         struct spu *spu;
 524 
 525         mutex_lock(&spu_full_list_mutex);
 526         list_for_each_entry(spu, &spu_full_list, full_list)
 527                 device_remove_file(&spu->dev, attr);
 528         mutex_unlock(&spu_full_list_mutex);
 529 }
 530 EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
 531 
 532 void spu_remove_dev_attr_group(struct attribute_group *attrs)
 533 {
 534         struct spu *spu;
 535 
 536         mutex_lock(&spu_full_list_mutex);
 537         list_for_each_entry(spu, &spu_full_list, full_list)
 538                 sysfs_remove_group(&spu->dev.kobj, attrs);
 539         mutex_unlock(&spu_full_list_mutex);
 540 }
 541 EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
 542 
 543 static int spu_create_dev(struct spu *spu)
 544 {
 545         int ret;
 546 
 547         spu->dev.id = spu->number;
 548         spu->dev.bus = &spu_subsys;
 549         ret = device_register(&spu->dev);
 550         if (ret) {
 551                 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
 552                                 spu->number);
 553                 return ret;
 554         }
 555 
 556         sysfs_add_device_to_node(&spu->dev, spu->node);
 557 
 558         return 0;
 559 }
 560 
 561 static int __init create_spu(void *data)
 562 {
 563         struct spu *spu;
 564         int ret;
 565         static int number;
 566         unsigned long flags;
 567 
 568         ret = -ENOMEM;
 569         spu = kzalloc(sizeof (*spu), GFP_KERNEL);
 570         if (!spu)
 571                 goto out;
 572 
 573         spu->alloc_state = SPU_FREE;
 574 
 575         spin_lock_init(&spu->register_lock);
 576         spin_lock(&spu_lock);
 577         spu->number = number++;
 578         spin_unlock(&spu_lock);
 579 
 580         ret = spu_create_spu(spu, data);
 581 
 582         if (ret)
 583                 goto out_free;
 584 
 585         spu_mfc_sdr_setup(spu);
 586         spu_mfc_sr1_set(spu, 0x33);
 587         ret = spu_request_irqs(spu);
 588         if (ret)
 589                 goto out_destroy;
 590 
 591         ret = spu_create_dev(spu);
 592         if (ret)
 593                 goto out_free_irqs;
 594 
 595         mutex_lock(&cbe_spu_info[spu->node].list_mutex);
 596         list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
 597         cbe_spu_info[spu->node].n_spus++;
 598         mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
 599 
 600         mutex_lock(&spu_full_list_mutex);
 601         spin_lock_irqsave(&spu_full_list_lock, flags);
 602         list_add(&spu->full_list, &spu_full_list);
 603         spin_unlock_irqrestore(&spu_full_list_lock, flags);
 604         mutex_unlock(&spu_full_list_mutex);
 605 
 606         spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
 607         spu->stats.tstamp = ktime_get_ns();
 608 
 609         INIT_LIST_HEAD(&spu->aff_list);
 610 
 611         goto out;
 612 
 613 out_free_irqs:
 614         spu_free_irqs(spu);
 615 out_destroy:
 616         spu_destroy_spu(spu);
 617 out_free:
 618         kfree(spu);
 619 out:
 620         return ret;
 621 }
 622 
 623 static const char *spu_state_names[] = {
 624         "user", "system", "iowait", "idle"
 625 };
 626 
 627 static unsigned long long spu_acct_time(struct spu *spu,
 628                 enum spu_utilization_state state)
 629 {
 630         unsigned long long time = spu->stats.times[state];
 631 
 632         /*
 633          * If the spu is idle or the context is stopped, utilization
 634          * statistics are not updated.  Apply the time delta from the
 635          * last recorded state of the spu.
 636          */
 637         if (spu->stats.util_state == state)
 638                 time += ktime_get_ns() - spu->stats.tstamp;
 639 
 640         return time / NSEC_PER_MSEC;
 641 }
 642 
 643 
 644 static ssize_t spu_stat_show(struct device *dev,
 645                                 struct device_attribute *attr, char *buf)
 646 {
 647         struct spu *spu = container_of(dev, struct spu, dev);
 648 
 649         return sprintf(buf, "%s %llu %llu %llu %llu "
 650                       "%llu %llu %llu %llu %llu %llu %llu %llu\n",
 651                 spu_state_names[spu->stats.util_state],
 652                 spu_acct_time(spu, SPU_UTIL_USER),
 653                 spu_acct_time(spu, SPU_UTIL_SYSTEM),
 654                 spu_acct_time(spu, SPU_UTIL_IOWAIT),
 655                 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
 656                 spu->stats.vol_ctx_switch,
 657                 spu->stats.invol_ctx_switch,
 658                 spu->stats.slb_flt,
 659                 spu->stats.hash_flt,
 660                 spu->stats.min_flt,
 661                 spu->stats.maj_flt,
 662                 spu->stats.class2_intr,
 663                 spu->stats.libassist);
 664 }
 665 
 666 static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
 667 
 668 #ifdef CONFIG_KEXEC_CORE
 669 
 670 struct crash_spu_info {
 671         struct spu *spu;
 672         u32 saved_spu_runcntl_RW;
 673         u32 saved_spu_status_R;
 674         u32 saved_spu_npc_RW;
 675         u64 saved_mfc_sr1_RW;
 676         u64 saved_mfc_dar;
 677         u64 saved_mfc_dsisr;
 678 };
 679 
 680 #define CRASH_NUM_SPUS  16      /* Enough for current hardware */
 681 static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
 682 
 683 static void crash_kexec_stop_spus(void)
 684 {
 685         struct spu *spu;
 686         int i;
 687         u64 tmp;
 688 
 689         for (i = 0; i < CRASH_NUM_SPUS; i++) {
 690                 if (!crash_spu_info[i].spu)
 691                         continue;
 692 
 693                 spu = crash_spu_info[i].spu;
 694 
 695                 crash_spu_info[i].saved_spu_runcntl_RW =
 696                         in_be32(&spu->problem->spu_runcntl_RW);
 697                 crash_spu_info[i].saved_spu_status_R =
 698                         in_be32(&spu->problem->spu_status_R);
 699                 crash_spu_info[i].saved_spu_npc_RW =
 700                         in_be32(&spu->problem->spu_npc_RW);
 701 
 702                 crash_spu_info[i].saved_mfc_dar    = spu_mfc_dar_get(spu);
 703                 crash_spu_info[i].saved_mfc_dsisr  = spu_mfc_dsisr_get(spu);
 704                 tmp = spu_mfc_sr1_get(spu);
 705                 crash_spu_info[i].saved_mfc_sr1_RW = tmp;
 706 
 707                 tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
 708                 spu_mfc_sr1_set(spu, tmp);
 709 
 710                 __delay(200);
 711         }
 712 }
 713 
 714 static void crash_register_spus(struct list_head *list)
 715 {
 716         struct spu *spu;
 717         int ret;
 718 
 719         list_for_each_entry(spu, list, full_list) {
 720                 if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
 721                         continue;
 722 
 723                 crash_spu_info[spu->number].spu = spu;
 724         }
 725 
 726         ret = crash_shutdown_register(&crash_kexec_stop_spus);
 727         if (ret)
 728                 printk(KERN_ERR "Could not register SPU crash handler");
 729 }
 730 
 731 #else
 732 static inline void crash_register_spus(struct list_head *list)
 733 {
 734 }
 735 #endif
 736 
 737 static void spu_shutdown(void)
 738 {
 739         struct spu *spu;
 740 
 741         mutex_lock(&spu_full_list_mutex);
 742         list_for_each_entry(spu, &spu_full_list, full_list) {
 743                 spu_free_irqs(spu);
 744                 spu_destroy_spu(spu);
 745         }
 746         mutex_unlock(&spu_full_list_mutex);
 747 }
 748 
 749 static struct syscore_ops spu_syscore_ops = {
 750         .shutdown = spu_shutdown,
 751 };
 752 
 753 static int __init init_spu_base(void)
 754 {
 755         int i, ret = 0;
 756 
 757         for (i = 0; i < MAX_NUMNODES; i++) {
 758                 mutex_init(&cbe_spu_info[i].list_mutex);
 759                 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
 760         }
 761 
 762         if (!spu_management_ops)
 763                 goto out;
 764 
 765         /* create system subsystem for spus */
 766         ret = subsys_system_register(&spu_subsys, NULL);
 767         if (ret)
 768                 goto out;
 769 
 770         ret = spu_enumerate_spus(create_spu);
 771 
 772         if (ret < 0) {
 773                 printk(KERN_WARNING "%s: Error initializing spus\n",
 774                         __func__);
 775                 goto out_unregister_subsys;
 776         }
 777 
 778         if (ret > 0)
 779                 fb_append_extra_logo(&logo_spe_clut224, ret);
 780 
 781         mutex_lock(&spu_full_list_mutex);
 782         xmon_register_spus(&spu_full_list);
 783         crash_register_spus(&spu_full_list);
 784         mutex_unlock(&spu_full_list_mutex);
 785         spu_add_dev_attr(&dev_attr_stat);
 786         register_syscore_ops(&spu_syscore_ops);
 787 
 788         spu_init_affinity();
 789 
 790         return 0;
 791 
 792  out_unregister_subsys:
 793         bus_unregister(&spu_subsys);
 794  out:
 795         return ret;
 796 }
 797 device_initcall(init_spu_base);

/* [<][>][^][v][top][bottom][index][help] */