root/arch/mips/bcm63xx/irq.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. get_ext_irq_perf_reg
  2. handle_internal
  3. enable_irq_for_cpu
  4. plat_irq_dispatch
  5. bcm63xx_internal_irq_mask
  6. bcm63xx_internal_irq_unmask
  7. bcm63xx_external_irq_mask
  8. bcm63xx_external_irq_unmask
  9. bcm63xx_external_irq_clear
  10. bcm63xx_external_irq_set_type
  11. bcm63xx_internal_set_affinity
  12. bcm63xx_init_irq
  13. arch_init_irq

   1 /*
   2  * This file is subject to the terms and conditions of the GNU General Public
   3  * License.  See the file "COPYING" in the main directory of this archive
   4  * for more details.
   5  *
   6  * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
   7  * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
   8  */
   9 
  10 #include <linux/kernel.h>
  11 #include <linux/init.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/irq.h>
  14 #include <linux/spinlock.h>
  15 #include <asm/irq_cpu.h>
  16 #include <asm/mipsregs.h>
  17 #include <bcm63xx_cpu.h>
  18 #include <bcm63xx_regs.h>
  19 #include <bcm63xx_io.h>
  20 #include <bcm63xx_irq.h>
  21 
  22 
  23 static DEFINE_SPINLOCK(ipic_lock);
  24 static DEFINE_SPINLOCK(epic_lock);
  25 
  26 static u32 irq_stat_addr[2];
  27 static u32 irq_mask_addr[2];
  28 static void (*dispatch_internal)(int cpu);
  29 static int is_ext_irq_cascaded;
  30 static unsigned int ext_irq_count;
  31 static unsigned int ext_irq_start, ext_irq_end;
  32 static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
  33 static void (*internal_irq_mask)(struct irq_data *d);
  34 static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
  35 
  36 
  37 static inline u32 get_ext_irq_perf_reg(int irq)
  38 {
  39         if (irq < 4)
  40                 return ext_irq_cfg_reg1;
  41         return ext_irq_cfg_reg2;
  42 }
  43 
  44 static inline void handle_internal(int intbit)
  45 {
  46         if (is_ext_irq_cascaded &&
  47             intbit >= ext_irq_start && intbit <= ext_irq_end)
  48                 do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
  49         else
  50                 do_IRQ(intbit + IRQ_INTERNAL_BASE);
  51 }
  52 
  53 static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
  54                                      const struct cpumask *m)
  55 {
  56         bool enable = cpu_online(cpu);
  57 
  58 #ifdef CONFIG_SMP
  59         if (m)
  60                 enable &= cpumask_test_cpu(cpu, m);
  61         else if (irqd_affinity_was_set(d))
  62                 enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
  63 #endif
  64         return enable;
  65 }
  66 
  67 /*
  68  * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
  69  * prioritize any interrupt relatively to another. the static counter
  70  * will resume the loop where it ended the last time we left this
  71  * function.
  72  */
  73 
  74 #define BUILD_IPIC_INTERNAL(width)                                      \
  75 void __dispatch_internal_##width(int cpu)                               \
  76 {                                                                       \
  77         u32 pending[width / 32];                                        \
  78         unsigned int src, tgt;                                          \
  79         bool irqs_pending = false;                                      \
  80         static unsigned int i[2];                                       \
  81         unsigned int *next = &i[cpu];                                   \
  82         unsigned long flags;                                            \
  83                                                                         \
  84         /* read registers in reverse order */                           \
  85         spin_lock_irqsave(&ipic_lock, flags);                           \
  86         for (src = 0, tgt = (width / 32); src < (width / 32); src++) {  \
  87                 u32 val;                                                \
  88                                                                         \
  89                 val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
  90                 val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
  91                 pending[--tgt] = val;                                   \
  92                                                                         \
  93                 if (val)                                                \
  94                         irqs_pending = true;                            \
  95         }                                                               \
  96         spin_unlock_irqrestore(&ipic_lock, flags);                      \
  97                                                                         \
  98         if (!irqs_pending)                                              \
  99                 return;                                                 \
 100                                                                         \
 101         while (1) {                                                     \
 102                 unsigned int to_call = *next;                           \
 103                                                                         \
 104                 *next = (*next + 1) & (width - 1);                      \
 105                 if (pending[to_call / 32] & (1 << (to_call & 0x1f))) {  \
 106                         handle_internal(to_call);                       \
 107                         break;                                          \
 108                 }                                                       \
 109         }                                                               \
 110 }                                                                       \
 111                                                                         \
 112 static void __internal_irq_mask_##width(struct irq_data *d)             \
 113 {                                                                       \
 114         u32 val;                                                        \
 115         unsigned irq = d->irq - IRQ_INTERNAL_BASE;                      \
 116         unsigned reg = (irq / 32) ^ (width/32 - 1);                     \
 117         unsigned bit = irq & 0x1f;                                      \
 118         unsigned long flags;                                            \
 119         int cpu;                                                        \
 120                                                                         \
 121         spin_lock_irqsave(&ipic_lock, flags);                           \
 122         for_each_present_cpu(cpu) {                                     \
 123                 if (!irq_mask_addr[cpu])                                \
 124                         break;                                          \
 125                                                                         \
 126                 val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
 127                 val &= ~(1 << bit);                                     \
 128                 bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
 129         }                                                               \
 130         spin_unlock_irqrestore(&ipic_lock, flags);                      \
 131 }                                                                       \
 132                                                                         \
 133 static void __internal_irq_unmask_##width(struct irq_data *d,           \
 134                                           const struct cpumask *m)      \
 135 {                                                                       \
 136         u32 val;                                                        \
 137         unsigned irq = d->irq - IRQ_INTERNAL_BASE;                      \
 138         unsigned reg = (irq / 32) ^ (width/32 - 1);                     \
 139         unsigned bit = irq & 0x1f;                                      \
 140         unsigned long flags;                                            \
 141         int cpu;                                                        \
 142                                                                         \
 143         spin_lock_irqsave(&ipic_lock, flags);                           \
 144         for_each_present_cpu(cpu) {                                     \
 145                 if (!irq_mask_addr[cpu])                                \
 146                         break;                                          \
 147                                                                         \
 148                 val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
 149                 if (enable_irq_for_cpu(cpu, d, m))                      \
 150                         val |= (1 << bit);                              \
 151                 else                                                    \
 152                         val &= ~(1 << bit);                             \
 153                 bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
 154         }                                                               \
 155         spin_unlock_irqrestore(&ipic_lock, flags);                      \
 156 }
 157 
 158 BUILD_IPIC_INTERNAL(32);
 159 BUILD_IPIC_INTERNAL(64);
 160 
 161 asmlinkage void plat_irq_dispatch(void)
 162 {
 163         u32 cause;
 164 
 165         do {
 166                 cause = read_c0_cause() & read_c0_status() & ST0_IM;
 167 
 168                 if (!cause)
 169                         break;
 170 
 171                 if (cause & CAUSEF_IP7)
 172                         do_IRQ(7);
 173                 if (cause & CAUSEF_IP0)
 174                         do_IRQ(0);
 175                 if (cause & CAUSEF_IP1)
 176                         do_IRQ(1);
 177                 if (cause & CAUSEF_IP2)
 178                         dispatch_internal(0);
 179                 if (is_ext_irq_cascaded) {
 180                         if (cause & CAUSEF_IP3)
 181                                 dispatch_internal(1);
 182                 } else {
 183                         if (cause & CAUSEF_IP3)
 184                                 do_IRQ(IRQ_EXT_0);
 185                         if (cause & CAUSEF_IP4)
 186                                 do_IRQ(IRQ_EXT_1);
 187                         if (cause & CAUSEF_IP5)
 188                                 do_IRQ(IRQ_EXT_2);
 189                         if (cause & CAUSEF_IP6)
 190                                 do_IRQ(IRQ_EXT_3);
 191                 }
 192         } while (1);
 193 }
 194 
 195 /*
 196  * internal IRQs operations: only mask/unmask on PERF irq mask
 197  * register.
 198  */
 199 static void bcm63xx_internal_irq_mask(struct irq_data *d)
 200 {
 201         internal_irq_mask(d);
 202 }
 203 
 204 static void bcm63xx_internal_irq_unmask(struct irq_data *d)
 205 {
 206         internal_irq_unmask(d, NULL);
 207 }
 208 
 209 /*
 210  * external IRQs operations: mask/unmask and clear on PERF external
 211  * irq control register.
 212  */
 213 static void bcm63xx_external_irq_mask(struct irq_data *d)
 214 {
 215         unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
 216         u32 reg, regaddr;
 217         unsigned long flags;
 218 
 219         regaddr = get_ext_irq_perf_reg(irq);
 220         spin_lock_irqsave(&epic_lock, flags);
 221         reg = bcm_perf_readl(regaddr);
 222 
 223         if (BCMCPU_IS_6348())
 224                 reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
 225         else
 226                 reg &= ~EXTIRQ_CFG_MASK(irq % 4);
 227 
 228         bcm_perf_writel(reg, regaddr);
 229         spin_unlock_irqrestore(&epic_lock, flags);
 230 
 231         if (is_ext_irq_cascaded)
 232                 internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
 233 }
 234 
 235 static void bcm63xx_external_irq_unmask(struct irq_data *d)
 236 {
 237         unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
 238         u32 reg, regaddr;
 239         unsigned long flags;
 240 
 241         regaddr = get_ext_irq_perf_reg(irq);
 242         spin_lock_irqsave(&epic_lock, flags);
 243         reg = bcm_perf_readl(regaddr);
 244 
 245         if (BCMCPU_IS_6348())
 246                 reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
 247         else
 248                 reg |= EXTIRQ_CFG_MASK(irq % 4);
 249 
 250         bcm_perf_writel(reg, regaddr);
 251         spin_unlock_irqrestore(&epic_lock, flags);
 252 
 253         if (is_ext_irq_cascaded)
 254                 internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
 255                                     NULL);
 256 }
 257 
 258 static void bcm63xx_external_irq_clear(struct irq_data *d)
 259 {
 260         unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
 261         u32 reg, regaddr;
 262         unsigned long flags;
 263 
 264         regaddr = get_ext_irq_perf_reg(irq);
 265         spin_lock_irqsave(&epic_lock, flags);
 266         reg = bcm_perf_readl(regaddr);
 267 
 268         if (BCMCPU_IS_6348())
 269                 reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
 270         else
 271                 reg |= EXTIRQ_CFG_CLEAR(irq % 4);
 272 
 273         bcm_perf_writel(reg, regaddr);
 274         spin_unlock_irqrestore(&epic_lock, flags);
 275 }
 276 
 277 static int bcm63xx_external_irq_set_type(struct irq_data *d,
 278                                          unsigned int flow_type)
 279 {
 280         unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
 281         u32 reg, regaddr;
 282         int levelsense, sense, bothedge;
 283         unsigned long flags;
 284 
 285         flow_type &= IRQ_TYPE_SENSE_MASK;
 286 
 287         if (flow_type == IRQ_TYPE_NONE)
 288                 flow_type = IRQ_TYPE_LEVEL_LOW;
 289 
 290         levelsense = sense = bothedge = 0;
 291         switch (flow_type) {
 292         case IRQ_TYPE_EDGE_BOTH:
 293                 bothedge = 1;
 294                 break;
 295 
 296         case IRQ_TYPE_EDGE_RISING:
 297                 sense = 1;
 298                 break;
 299 
 300         case IRQ_TYPE_EDGE_FALLING:
 301                 break;
 302 
 303         case IRQ_TYPE_LEVEL_HIGH:
 304                 levelsense = 1;
 305                 sense = 1;
 306                 break;
 307 
 308         case IRQ_TYPE_LEVEL_LOW:
 309                 levelsense = 1;
 310                 break;
 311 
 312         default:
 313                 pr_err("bogus flow type combination given !\n");
 314                 return -EINVAL;
 315         }
 316 
 317         regaddr = get_ext_irq_perf_reg(irq);
 318         spin_lock_irqsave(&epic_lock, flags);
 319         reg = bcm_perf_readl(regaddr);
 320         irq %= 4;
 321 
 322         switch (bcm63xx_get_cpu_id()) {
 323         case BCM6348_CPU_ID:
 324                 if (levelsense)
 325                         reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
 326                 else
 327                         reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
 328                 if (sense)
 329                         reg |= EXTIRQ_CFG_SENSE_6348(irq);
 330                 else
 331                         reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
 332                 if (bothedge)
 333                         reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
 334                 else
 335                         reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
 336                 break;
 337 
 338         case BCM3368_CPU_ID:
 339         case BCM6328_CPU_ID:
 340         case BCM6338_CPU_ID:
 341         case BCM6345_CPU_ID:
 342         case BCM6358_CPU_ID:
 343         case BCM6362_CPU_ID:
 344         case BCM6368_CPU_ID:
 345                 if (levelsense)
 346                         reg |= EXTIRQ_CFG_LEVELSENSE(irq);
 347                 else
 348                         reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
 349                 if (sense)
 350                         reg |= EXTIRQ_CFG_SENSE(irq);
 351                 else
 352                         reg &= ~EXTIRQ_CFG_SENSE(irq);
 353                 if (bothedge)
 354                         reg |= EXTIRQ_CFG_BOTHEDGE(irq);
 355                 else
 356                         reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
 357                 break;
 358         default:
 359                 BUG();
 360         }
 361 
 362         bcm_perf_writel(reg, regaddr);
 363         spin_unlock_irqrestore(&epic_lock, flags);
 364 
 365         irqd_set_trigger_type(d, flow_type);
 366         if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 367                 irq_set_handler_locked(d, handle_level_irq);
 368         else
 369                 irq_set_handler_locked(d, handle_edge_irq);
 370 
 371         return IRQ_SET_MASK_OK_NOCOPY;
 372 }
 373 
 374 #ifdef CONFIG_SMP
 375 static int bcm63xx_internal_set_affinity(struct irq_data *data,
 376                                          const struct cpumask *dest,
 377                                          bool force)
 378 {
 379         if (!irqd_irq_disabled(data))
 380                 internal_irq_unmask(data, dest);
 381 
 382         return 0;
 383 }
 384 #endif
 385 
 386 static struct irq_chip bcm63xx_internal_irq_chip = {
 387         .name           = "bcm63xx_ipic",
 388         .irq_mask       = bcm63xx_internal_irq_mask,
 389         .irq_unmask     = bcm63xx_internal_irq_unmask,
 390 };
 391 
 392 static struct irq_chip bcm63xx_external_irq_chip = {
 393         .name           = "bcm63xx_epic",
 394         .irq_ack        = bcm63xx_external_irq_clear,
 395 
 396         .irq_mask       = bcm63xx_external_irq_mask,
 397         .irq_unmask     = bcm63xx_external_irq_unmask,
 398 
 399         .irq_set_type   = bcm63xx_external_irq_set_type,
 400 };
 401 
 402 static struct irqaction cpu_ip2_cascade_action = {
 403         .handler        = no_action,
 404         .name           = "cascade_ip2",
 405         .flags          = IRQF_NO_THREAD,
 406 };
 407 
 408 #ifdef CONFIG_SMP
 409 static struct irqaction cpu_ip3_cascade_action = {
 410         .handler        = no_action,
 411         .name           = "cascade_ip3",
 412         .flags          = IRQF_NO_THREAD,
 413 };
 414 #endif
 415 
 416 static struct irqaction cpu_ext_cascade_action = {
 417         .handler        = no_action,
 418         .name           = "cascade_extirq",
 419         .flags          = IRQF_NO_THREAD,
 420 };
 421 
 422 static void bcm63xx_init_irq(void)
 423 {
 424         int irq_bits;
 425 
 426         irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
 427         irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
 428         irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
 429         irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
 430 
 431         switch (bcm63xx_get_cpu_id()) {
 432         case BCM3368_CPU_ID:
 433                 irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
 434                 irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
 435                 irq_stat_addr[1] = 0;
 436                 irq_mask_addr[1] = 0;
 437                 irq_bits = 32;
 438                 ext_irq_count = 4;
 439                 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
 440                 break;
 441         case BCM6328_CPU_ID:
 442                 irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
 443                 irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
 444                 irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
 445                 irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
 446                 irq_bits = 64;
 447                 ext_irq_count = 4;
 448                 is_ext_irq_cascaded = 1;
 449                 ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
 450                 ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
 451                 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
 452                 break;
 453         case BCM6338_CPU_ID:
 454                 irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
 455                 irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
 456                 irq_stat_addr[1] = 0;
 457                 irq_mask_addr[1] = 0;
 458                 irq_bits = 32;
 459                 ext_irq_count = 4;
 460                 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
 461                 break;
 462         case BCM6345_CPU_ID:
 463                 irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
 464                 irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
 465                 irq_stat_addr[1] = 0;
 466                 irq_mask_addr[1] = 0;
 467                 irq_bits = 32;
 468                 ext_irq_count = 4;
 469                 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
 470                 break;
 471         case BCM6348_CPU_ID:
 472                 irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
 473                 irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
 474                 irq_stat_addr[1] = 0;
 475                 irq_mask_addr[1] = 0;
 476                 irq_bits = 32;
 477                 ext_irq_count = 4;
 478                 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
 479                 break;
 480         case BCM6358_CPU_ID:
 481                 irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
 482                 irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
 483                 irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
 484                 irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
 485                 irq_bits = 32;
 486                 ext_irq_count = 4;
 487                 is_ext_irq_cascaded = 1;
 488                 ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
 489                 ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
 490                 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
 491                 break;
 492         case BCM6362_CPU_ID:
 493                 irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
 494                 irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
 495                 irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
 496                 irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
 497                 irq_bits = 64;
 498                 ext_irq_count = 4;
 499                 is_ext_irq_cascaded = 1;
 500                 ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
 501                 ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
 502                 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
 503                 break;
 504         case BCM6368_CPU_ID:
 505                 irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
 506                 irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
 507                 irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
 508                 irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
 509                 irq_bits = 64;
 510                 ext_irq_count = 6;
 511                 is_ext_irq_cascaded = 1;
 512                 ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
 513                 ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
 514                 ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
 515                 ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
 516                 break;
 517         default:
 518                 BUG();
 519         }
 520 
 521         if (irq_bits == 32) {
 522                 dispatch_internal = __dispatch_internal_32;
 523                 internal_irq_mask = __internal_irq_mask_32;
 524                 internal_irq_unmask = __internal_irq_unmask_32;
 525         } else {
 526                 dispatch_internal = __dispatch_internal_64;
 527                 internal_irq_mask = __internal_irq_mask_64;
 528                 internal_irq_unmask = __internal_irq_unmask_64;
 529         }
 530 }
 531 
 532 void __init arch_init_irq(void)
 533 {
 534         int i;
 535 
 536         bcm63xx_init_irq();
 537         mips_cpu_irq_init();
 538         for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
 539                 irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
 540                                          handle_level_irq);
 541 
 542         for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
 543                 irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
 544                                          handle_edge_irq);
 545 
 546         if (!is_ext_irq_cascaded) {
 547                 for (i = 3; i < 3 + ext_irq_count; ++i)
 548                         setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
 549         }
 550 
 551         setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
 552 #ifdef CONFIG_SMP
 553         if (is_ext_irq_cascaded) {
 554                 setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
 555                 bcm63xx_internal_irq_chip.irq_set_affinity =
 556                         bcm63xx_internal_set_affinity;
 557 
 558                 cpumask_clear(irq_default_affinity);
 559                 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
 560         }
 561 #endif
 562 }

/* [<][>][^][v][top][bottom][index][help] */